Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/sys/contrib/openzfs/.github/workflows/checkstyle.yaml b/sys/contrib/openzfs/.github/workflows/checkstyle.yaml
index 33276d269092..14a921099e30 100644
--- a/sys/contrib/openzfs/.github/workflows/checkstyle.yaml
+++ b/sys/contrib/openzfs/.github/workflows/checkstyle.yaml
@@ -1,50 +1,50 @@
name: checkstyle
on:
push:
pull_request:
jobs:
checkstyle:
runs-on: ubuntu-18.04
steps:
- uses: actions/checkout@v2
with:
ref: ${{ github.event.pull_request.head.sha }}
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install --yes -qq build-essential autoconf libtool gawk alien fakeroot linux-headers-$(uname -r)
sudo apt-get install --yes -qq zlib1g-dev uuid-dev libattr1-dev libblkid-dev libselinux-dev libudev-dev libssl-dev python-dev python-setuptools python-cffi python3 python3-dev python3-setuptools python3-cffi
# packages for tests
sudo apt-get install --yes -qq parted lsscsi ksh attr acl nfs-kernel-server fio
- sudo apt-get install --yes -qq mandoc cppcheck pax-utils devscripts abigail-tools
+ sudo apt-get install --yes -qq mandoc cppcheck pax-utils devscripts
sudo -E pip --quiet install flake8
- name: Prepare
run: |
sh ./autogen.sh
./configure
make -j$(nproc)
- name: Checkstyle
run: |
make checkstyle
- name: Lint
run: |
make lint
- name: CheckABI
id: CheckABI
run: |
- make checkabi
+ sudo docker run -v $(pwd):/source ghcr.io/openzfs/libabigail make checkabi
- name: StoreABI
if: failure() && steps.CheckABI.outcome == 'failure'
run: |
- make storeabi
+ sudo docker run -v $(pwd):/source ghcr.io/openzfs/libabigail make storeabi
- name: Prepare artifacts
if: failure() && steps.CheckABI.outcome == 'failure'
run: |
find -name *.abi | tar -cf abi_files.tar -T -
- uses: actions/upload-artifact@v2
if: failure() && steps.CheckABI.outcome == 'failure'
with:
name: New ABI files (use only if you're sure about interface changes)
path: abi_files.tar
diff --git a/sys/contrib/openzfs/Makefile.am b/sys/contrib/openzfs/Makefile.am
index 4e7e29589fc0..060729642533 100644
--- a/sys/contrib/openzfs/Makefile.am
+++ b/sys/contrib/openzfs/Makefile.am
@@ -1,225 +1,235 @@
include $(top_srcdir)/config/Shellcheck.am
ACLOCAL_AMFLAGS = -I config
SUBDIRS = include
if BUILD_LINUX
SUBDIRS += rpm
endif
if CONFIG_USER
SUBDIRS += man scripts lib tests cmd etc contrib
if BUILD_LINUX
SUBDIRS += udev
endif
endif
if CONFIG_KERNEL
SUBDIRS += module
extradir = $(prefix)/src/zfs-$(VERSION)
extra_HEADERS = zfs.release.in zfs_config.h.in
if BUILD_LINUX
kerneldir = $(prefix)/src/zfs-$(VERSION)/$(LINUX_VERSION)
nodist_kernel_HEADERS = zfs.release zfs_config.h module/$(LINUX_SYMBOLS)
endif
endif
AUTOMAKE_OPTIONS = foreign
EXTRA_DIST = autogen.sh copy-builtin
EXTRA_DIST += config/config.awk config/rpm.am config/deb.am config/tgz.am
EXTRA_DIST += AUTHORS CODE_OF_CONDUCT.md COPYRIGHT LICENSE META NEWS NOTICE
EXTRA_DIST += README.md RELEASES.md
EXTRA_DIST += module/lua/README.zfs module/os/linux/spl/README.md
# Include all the extra licensing information for modules
EXTRA_DIST += module/icp/algs/skein/THIRDPARTYLICENSE
EXTRA_DIST += module/icp/algs/skein/THIRDPARTYLICENSE.descrip
EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman
EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.gladman.descrip
EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl
EXTRA_DIST += module/icp/asm-x86_64/aes/THIRDPARTYLICENSE.openssl.descrip
EXTRA_DIST += module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.cryptogams
EXTRA_DIST += module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.cryptogams.descrip
EXTRA_DIST += module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.openssl
EXTRA_DIST += module/icp/asm-x86_64/modes/THIRDPARTYLICENSE.openssl.descrip
EXTRA_DIST += module/os/linux/spl/THIRDPARTYLICENSE.gplv2
EXTRA_DIST += module/os/linux/spl/THIRDPARTYLICENSE.gplv2.descrip
EXTRA_DIST += module/zfs/THIRDPARTYLICENSE.cityhash
EXTRA_DIST += module/zfs/THIRDPARTYLICENSE.cityhash.descrip
@CODE_COVERAGE_RULES@
GITREV = include/zfs_gitrev.h
PHONY = gitrev
gitrev:
$(AM_V_GEN)$(top_srcdir)/scripts/make_gitrev.sh $(GITREV)
all: gitrev
# Double-colon rules are allowed; there are multiple independent definitions.
maintainer-clean-local::
-$(RM) $(GITREV)
distclean-local::
-$(RM) -R autom4te*.cache build
-find . \( -name SCCS -o -name BitKeeper -o -name .svn -o -name CVS \
-o -name .pc -o -name .hg -o -name .git \) -prune -o \
\( -name '*.orig' -o -name '*.rej' -o -name '*~' \
-o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
-o -name '.*.rej' -o -size 0 -o -name '*%' -o -name '.*.cmd' \
-o -name 'core' -o -name 'Makefile' -o -name 'Module.symvers' \
-o -name '*.order' -o -name '*.markers' -o -name '*.gcda' \
-o -name '*.gcno' \) \
-type f -print | xargs $(RM)
all-local:
-[ -x ${top_builddir}/scripts/zfs-tests.sh ] && \
${top_builddir}/scripts/zfs-tests.sh -c
dist-hook:
$(AM_V_GEN)$(top_srcdir)/scripts/make_gitrev.sh -D $(distdir) $(GITREV)
$(SED) ${ac_inplace} -e 's/Release:[[:print:]]*/Release: $(RELEASE)/' \
$(distdir)/META
if BUILD_LINUX
# For compatibility, create a matching spl-x.y.z directly which contains
# symlinks to the updated header and object file locations. These
# compatibility links will be removed in the next major release.
if CONFIG_KERNEL
install-data-hook:
rm -rf $(DESTDIR)$(prefix)/src/spl-$(VERSION) && \
mkdir $(DESTDIR)$(prefix)/src/spl-$(VERSION) && \
cd $(DESTDIR)$(prefix)/src/spl-$(VERSION) && \
ln -s ../zfs-$(VERSION)/include/spl include && \
ln -s ../zfs-$(VERSION)/$(LINUX_VERSION) $(LINUX_VERSION) && \
ln -s ../zfs-$(VERSION)/zfs_config.h.in spl_config.h.in && \
ln -s ../zfs-$(VERSION)/zfs.release.in spl.release.in && \
cd $(DESTDIR)$(prefix)/src/zfs-$(VERSION)/$(LINUX_VERSION) && \
ln -fs zfs_config.h spl_config.h && \
ln -fs zfs.release spl.release
endif
endif
PHONY += codecheck
codecheck: cstyle shellcheck checkbashisms flake8 mancheck testscheck vcscheck
PHONY += checkstyle
checkstyle: codecheck commitcheck
PHONY += commitcheck
commitcheck:
@if git rev-parse --git-dir > /dev/null 2>&1; then \
${top_srcdir}/scripts/commitcheck.sh; \
fi
PHONY += cstyle
cstyle:
@find ${top_srcdir} -name build -prune \
-o -type f -name '*.[hc]' \
! -name 'zfs_config.*' ! -name '*.mod.c' \
! -name 'opt_global.h' ! -name '*_if*.h' \
! -path './module/zstd/lib/*' \
-exec ${top_srcdir}/scripts/cstyle.pl -cpP {} \+
filter_executable = -exec test -x '{}' \; -print
SHELLCHECKDIRS = cmd contrib etc scripts tests
SHELLCHECKSCRIPTS = autogen.sh
PHONY += checkabi storeabi
-checkabi: lib
+
+checklibabiversion:
+ libabiversion=`abidw -v | $(SED) 's/[^0-9]//g'`; \
+ if test $$libabiversion -lt "180"; then \
+ /bin/echo -e "\n" \
+ "*** Please use libabigail 1.8.0 version or newer;\n" \
+ "*** otherwise results are not consistent!\n"; \
+ exit 1; \
+ fi;
+
+checkabi: checklibabiversion lib
$(MAKE) -C lib checkabi
-storeabi: lib
+storeabi: checklibabiversion lib
$(MAKE) -C lib storeabi
PHONY += mancheck
mancheck:
${top_srcdir}/scripts/mancheck.sh ${top_srcdir}/man ${top_srcdir}/tests/test-runner/man
if BUILD_LINUX
stat_fmt = -c '%A %n'
else
stat_fmt = -f '%Sp %N'
endif
PHONY += testscheck
testscheck:
@find ${top_srcdir}/tests/zfs-tests -type f \
\( -name '*.ksh' -not ${filter_executable} \) -o \
\( -name '*.kshlib' ${filter_executable} \) -o \
\( -name '*.shlib' ${filter_executable} \) -o \
\( -name '*.cfg' ${filter_executable} \) | \
xargs -r stat ${stat_fmt} | \
awk '{c++; print} END {if(c>0) exit 1}'
PHONY += vcscheck
vcscheck:
@if git rev-parse --git-dir > /dev/null 2>&1; then \
git ls-files . --exclude-standard --others | \
awk '{c++; print} END {if(c>0) exit 1}' ; \
fi
PHONY += lint
lint: cppcheck paxcheck
CPPCHECKDIRS = cmd lib module
PHONY += cppcheck
cppcheck: $(CPPCHECKDIRS)
@if test -n "$(CPPCHECK)"; then \
set -e ; for dir in $(CPPCHECKDIRS) ; do \
$(MAKE) -C $$dir cppcheck ; \
done \
else \
echo "skipping cppcheck because cppcheck is not installed"; \
fi
PHONY += paxcheck
paxcheck:
@if type scanelf > /dev/null 2>&1; then \
${top_srcdir}/scripts/paxcheck.sh ${top_builddir}; \
else \
echo "skipping paxcheck because scanelf is not installed"; \
fi
PHONY += flake8
flake8:
@if type flake8 > /dev/null 2>&1; then \
flake8 ${top_srcdir}; \
else \
echo "skipping flake8 because flake8 is not installed"; \
fi
PHONY += ctags
ctags:
$(RM) tags
find $(top_srcdir) -name '.?*' -prune \
-o -type f -name '*.[hcS]' -print | xargs ctags -a
PHONY += etags
etags:
$(RM) TAGS
find $(top_srcdir) -name '.?*' -prune \
-o -type f -name '*.[hcS]' -print | xargs etags -a
PHONY += cscopelist
cscopelist:
find $(top_srcdir) -name '.?*' -prune \
-o -type f -name '*.[hc]' -print >cscope.files
PHONY += tags
tags: ctags etags
PHONY += pkg pkg-dkms pkg-kmod pkg-utils
pkg: @DEFAULT_PACKAGE@
pkg-dkms: @DEFAULT_PACKAGE@-dkms
pkg-kmod: @DEFAULT_PACKAGE@-kmod
pkg-utils: @DEFAULT_PACKAGE@-utils
include config/rpm.am
include config/deb.am
include config/tgz.am
.PHONY: $(PHONY)
diff --git a/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c b/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c
index f59571ace6bc..434d53cbad04 100644
--- a/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c
+++ b/sys/contrib/openzfs/cmd/mount_zfs/mount_zfs.c
@@ -1,388 +1,388 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011 Lawrence Livermore National Security, LLC.
*/
#include <libintl.h>
#include <unistd.h>
#include <sys/file.h>
#include <sys/mount.h>
#include <sys/mntent.h>
#include <sys/stat.h>
#include <libzfs.h>
#include <libzutil.h>
#include <locale.h>
#include <getopt.h>
#include <fcntl.h>
#include <errno.h>
#define ZS_COMMENT 0x00000000 /* comment */
#define ZS_ZFSUTIL 0x00000001 /* caller is zfs(8) */
libzfs_handle_t *g_zfs;
/*
* Opportunistically convert a target string into a pool name. If the
* string does not represent a block device with a valid zfs label
* then it is passed through without modification.
*/
static void
parse_dataset(const char *target, char **dataset)
{
/*
* Prior to util-linux 2.36.2, if a file or directory in the
* current working directory was named 'dataset' then mount(8)
* would prepend the current working directory to the dataset.
* Check for it and strip the prepended path when it is added.
*/
char cwd[PATH_MAX];
if (getcwd(cwd, PATH_MAX) == NULL) {
perror("getcwd");
return;
}
int len = strlen(cwd);
if (strncmp(cwd, target, len) == 0)
target += len;
/* Assume pool/dataset is more likely */
strlcpy(*dataset, target, PATH_MAX);
int fd = open(target, O_RDONLY | O_CLOEXEC);
if (fd < 0)
return;
nvlist_t *cfg = NULL;
if (zpool_read_label(fd, &cfg, NULL) == 0) {
char *nm = NULL;
if (!nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &nm))
strlcpy(*dataset, nm, PATH_MAX);
nvlist_free(cfg);
}
if (close(fd))
perror("close");
}
/*
* Update the mtab_* code to use the libmount library when it is commonly
* available otherwise fallback to legacy mode. The mount(8) utility will
* manage the lock file for us to prevent racing updates to /etc/mtab.
*/
static int
mtab_is_writeable(void)
{
struct stat st;
int error, fd;
error = lstat("/etc/mtab", &st);
if (error || S_ISLNK(st.st_mode))
return (0);
fd = open("/etc/mtab", O_RDWR | O_CREAT, 0644);
if (fd < 0)
return (0);
close(fd);
return (1);
}
static int
mtab_update(char *dataset, char *mntpoint, char *type, char *mntopts)
{
struct mntent mnt;
FILE *fp;
int error;
mnt.mnt_fsname = dataset;
mnt.mnt_dir = mntpoint;
mnt.mnt_type = type;
mnt.mnt_opts = mntopts ? mntopts : "";
mnt.mnt_freq = 0;
mnt.mnt_passno = 0;
fp = setmntent("/etc/mtab", "a+");
if (!fp) {
(void) fprintf(stderr, gettext(
"filesystem '%s' was mounted, but /etc/mtab "
"could not be opened due to error: %s\n"),
dataset, strerror(errno));
return (MOUNT_FILEIO);
}
error = addmntent(fp, &mnt);
if (error) {
(void) fprintf(stderr, gettext(
"filesystem '%s' was mounted, but /etc/mtab "
"could not be updated due to error: %s\n"),
dataset, strerror(errno));
return (MOUNT_FILEIO);
}
(void) endmntent(fp);
return (MOUNT_SUCCESS);
}
int
main(int argc, char **argv)
{
zfs_handle_t *zhp;
char prop[ZFS_MAXPROPLEN];
uint64_t zfs_version = 0;
char mntopts[MNT_LINE_MAX] = { '\0' };
char badopt[MNT_LINE_MAX] = { '\0' };
char mtabopt[MNT_LINE_MAX] = { '\0' };
char mntpoint[PATH_MAX];
char dataset[PATH_MAX], *pdataset = dataset;
unsigned long mntflags = 0, zfsflags = 0, remount = 0;
int sloppy = 0, fake = 0, verbose = 0, nomtab = 0, zfsutil = 0;
int error, c;
(void) setlocale(LC_ALL, "");
(void) setlocale(LC_NUMERIC, "C");
(void) textdomain(TEXT_DOMAIN);
opterr = 0;
/* check options */
while ((c = getopt_long(argc, argv, "sfnvo:h?", 0, 0)) != -1) {
switch (c) {
case 's':
sloppy = 1;
break;
case 'f':
fake = 1;
break;
case 'n':
nomtab = 1;
break;
case 'v':
verbose++;
break;
case 'o':
(void) strlcpy(mntopts, optarg, sizeof (mntopts));
break;
case 'h':
case '?':
if (optopt)
(void) fprintf(stderr,
gettext("Invalid option '%c'\n"), optopt);
(void) fprintf(stderr, gettext("Usage: mount.zfs "
"[-sfnvh] [-o options] <dataset> <mountpoint>\n"));
return (MOUNT_USAGE);
}
}
argc -= optind;
argv += optind;
/* check that we only have two arguments */
if (argc != 2) {
if (argc == 0)
(void) fprintf(stderr, gettext("missing dataset "
"argument\n"));
else if (argc == 1)
(void) fprintf(stderr,
gettext("missing mountpoint argument\n"));
else
(void) fprintf(stderr, gettext("too many arguments\n"));
(void) fprintf(stderr, "usage: mount <dataset> <mountpoint>\n");
return (MOUNT_USAGE);
}
parse_dataset(argv[0], &pdataset);
/* canonicalize the mount point */
if (realpath(argv[1], mntpoint) == NULL) {
(void) fprintf(stderr, gettext("filesystem '%s' cannot be "
"mounted at '%s' due to canonicalization error: %s\n"),
dataset, argv[1], strerror(errno));
return (MOUNT_SYSERR);
}
/* validate mount options and set mntflags */
error = zfs_parse_mount_options(mntopts, &mntflags, &zfsflags, sloppy,
badopt, mtabopt);
if (error) {
switch (error) {
case ENOMEM:
(void) fprintf(stderr, gettext("filesystem '%s' "
"cannot be mounted due to a memory allocation "
"failure.\n"), dataset);
return (MOUNT_SYSERR);
case ENOENT:
(void) fprintf(stderr, gettext("filesystem '%s' "
"cannot be mounted due to invalid option "
"'%s'.\n"), dataset, badopt);
(void) fprintf(stderr, gettext("Use the '-s' option "
"to ignore the bad mount option.\n"));
return (MOUNT_USAGE);
default:
(void) fprintf(stderr, gettext("filesystem '%s' "
"cannot be mounted due to internal error %d.\n"),
dataset, error);
return (MOUNT_SOFTWARE);
}
}
if (verbose)
(void) fprintf(stdout, gettext("mount.zfs:\n"
" dataset: \"%s\"\n mountpoint: \"%s\"\n"
" mountflags: 0x%lx\n zfsflags: 0x%lx\n"
" mountopts: \"%s\"\n mtabopts: \"%s\"\n"),
dataset, mntpoint, mntflags, zfsflags, mntopts, mtabopt);
if (mntflags & MS_REMOUNT) {
nomtab = 1;
remount = 1;
}
if (zfsflags & ZS_ZFSUTIL)
zfsutil = 1;
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (MOUNT_SYSERR);
}
/* try to open the dataset to access the mount point */
if ((zhp = zfs_open(g_zfs, dataset,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT)) == NULL) {
(void) fprintf(stderr, gettext("filesystem '%s' cannot be "
"mounted, unable to open the dataset\n"), dataset);
libzfs_fini(g_zfs);
return (MOUNT_USAGE);
}
zfs_adjust_mount_options(zhp, mntpoint, mntopts, mtabopt);
/* treat all snapshots as legacy mount points */
if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT)
(void) strlcpy(prop, ZFS_MOUNTPOINT_LEGACY, ZFS_MAXPROPLEN);
else
(void) zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, prop,
sizeof (prop), NULL, NULL, 0, B_FALSE);
/*
* Fetch the max supported zfs version in case we get ENOTSUP
* back from the mount command, since we need the zfs handle
* to do so.
*/
zfs_version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
if (zfs_version == 0) {
fprintf(stderr, gettext("unable to fetch "
"ZFS version for filesystem '%s'\n"), dataset);
return (MOUNT_SYSERR);
}
zfs_close(zhp);
libzfs_fini(g_zfs);
/*
* Legacy mount points may only be mounted using 'mount', never using
* 'zfs mount'. However, since 'zfs mount' actually invokes 'mount'
* we differentiate the two cases using the 'zfsutil' mount option.
* This mount option should only be supplied by the 'zfs mount' util.
*
* The only exception to the above rule is '-o remount' which is
* always allowed for non-legacy datasets. This is done because when
* using zfs as your root file system both rc.sysinit/umountroot and
* systemd depend on 'mount -o remount <mountpoint>' to work.
*/
if (zfsutil && (strcmp(prop, ZFS_MOUNTPOINT_LEGACY) == 0)) {
(void) fprintf(stderr, gettext(
"filesystem '%s' cannot be mounted using 'zfs mount'.\n"
"Use 'zfs set mountpoint=%s' or 'mount -t zfs %s %s'.\n"
"See zfs(8) for more information.\n"),
dataset, mntpoint, dataset, mntpoint);
return (MOUNT_USAGE);
}
if (!zfsutil && !(remount || fake) &&
strcmp(prop, ZFS_MOUNTPOINT_LEGACY)) {
(void) fprintf(stderr, gettext(
"filesystem '%s' cannot be mounted using 'mount'.\n"
"Use 'zfs set mountpoint=%s' or 'zfs mount %s'.\n"
"See zfs(8) for more information.\n"),
dataset, "legacy", dataset);
return (MOUNT_USAGE);
}
if (!fake) {
error = mount(dataset, mntpoint, MNTTYPE_ZFS,
mntflags, mntopts);
}
if (error) {
switch (errno) {
case ENOENT:
(void) fprintf(stderr, gettext("mount point "
"'%s' does not exist\n"), mntpoint);
return (MOUNT_SYSERR);
case EBUSY:
(void) fprintf(stderr, gettext("filesystem "
"'%s' is already mounted\n"), dataset);
return (MOUNT_BUSY);
case ENOTSUP:
if (zfs_version > ZPL_VERSION) {
(void) fprintf(stderr,
gettext("filesystem '%s' (v%d) is not "
"supported by this implementation of "
"ZFS (max v%d).\n"), dataset,
(int)zfs_version, (int)ZPL_VERSION);
} else {
(void) fprintf(stderr,
gettext("filesystem '%s' mount "
"failed for unknown reason.\n"), dataset);
}
return (MOUNT_SYSERR);
#ifdef MS_MANDLOCK
case EPERM:
if (mntflags & MS_MANDLOCK) {
(void) fprintf(stderr, gettext("filesystem "
"'%s' has the 'nbmand=on' property set, "
"this mount\noption may be disabled in "
"your kernel. Use 'zfs set nbmand=off'\n"
"to disable this option and try to "
"mount the filesystem again.\n"), dataset);
return (MOUNT_SYSERR);
}
#endif
- /* FALLTHROUGH */
+ fallthrough;
default:
(void) fprintf(stderr, gettext("filesystem "
"'%s' can not be mounted: %s\n"), dataset,
strerror(errno));
return (MOUNT_USAGE);
}
}
if (!nomtab && mtab_is_writeable()) {
error = mtab_update(dataset, mntpoint, MNTTYPE_ZFS, mtabopt);
if (error)
return (error);
}
return (MOUNT_SUCCESS);
}
diff --git a/sys/contrib/openzfs/cmd/zdb/zdb.c b/sys/contrib/openzfs/cmd/zdb/zdb.c
index f9c3f9ea6b59..797cac0874fe 100644
--- a/sys/contrib/openzfs/cmd/zdb/zdb.c
+++ b/sys/contrib/openzfs/cmd/zdb/zdb.c
@@ -1,8816 +1,8827 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2019 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017, 2018 Lawrence Livermore National Security, LLC.
* Copyright (c) 2015, 2017, Intel Corporation.
* Copyright (c) 2020 Datto Inc.
* Copyright (c) 2020, The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
* Copyright (c) 2021 Allan Jude
* Copyright (c) 2021 Toomas Soome <tsoome@me.com>
*/
#include <stdio.h>
#include <unistd.h>
#include <stdlib.h>
#include <ctype.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_sa.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_bookmark.h>
#include <sys/dbuf.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/stat.h>
#include <sys/resource.h>
#include <sys/dmu_send.h>
#include <sys/dmu_traverse.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/zfs_fuid.h>
#include <sys/arc.h>
#include <sys/arc_impl.h>
#include <sys/ddt.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/blkptr.h>
#include <sys/dsl_crypt.h>
#include <sys/dsl_scan.h>
#include <sys/btree.h>
#include <zfs_comutil.h>
#include <sys/zstd/zstd.h>
#include <libnvpair.h>
#include <libzutil.h>
#include "zdb.h"
#define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \
zio_compress_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \
zio_checksum_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \
(idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA ? \
DMU_OT_ZAP_OTHER : \
(idx) == DMU_OTN_UINT64_DATA || (idx) == DMU_OTN_UINT64_METADATA ? \
DMU_OT_UINT64_OTHER : DMU_OT_NUMTYPES)
+/* Some platforms require part of inode IDs to be remapped */
+#ifdef __APPLE__
+#define ZDB_MAP_OBJECT_ID(obj) INO_XNUTOZFS(obj, 2)
+#else
+#define ZDB_MAP_OBJECT_ID(obj) (obj)
+#endif
+
static char *
zdb_ot_name(dmu_object_type_t type)
{
if (type < DMU_OT_NUMTYPES)
return (dmu_ot[type].ot_name);
else if ((type & DMU_OT_NEWTYPE) &&
((type & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS))
return (dmu_ot_byteswap[type & DMU_OT_BYTESWAP_MASK].ob_name);
else
return ("UNKNOWN");
}
extern int reference_tracking_enable;
extern int zfs_recover;
extern unsigned long zfs_arc_meta_min, zfs_arc_meta_limit;
extern int zfs_vdev_async_read_max_active;
extern boolean_t spa_load_verify_dryrun;
extern int zfs_reconstruct_indirect_combinations_max;
extern int zfs_btree_verify_intensity;
static const char cmdname[] = "zdb";
uint8_t dump_opt[256];
typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
uint64_t *zopt_metaslab = NULL;
static unsigned zopt_metaslab_args = 0;
typedef struct zopt_object_range {
uint64_t zor_obj_start;
uint64_t zor_obj_end;
uint64_t zor_flags;
} zopt_object_range_t;
zopt_object_range_t *zopt_object_ranges = NULL;
static unsigned zopt_object_args = 0;
static int flagbits[256];
#define ZOR_FLAG_PLAIN_FILE 0x0001
#define ZOR_FLAG_DIRECTORY 0x0002
#define ZOR_FLAG_SPACE_MAP 0x0004
#define ZOR_FLAG_ZAP 0x0008
#define ZOR_FLAG_ALL_TYPES -1
#define ZOR_SUPPORTED_FLAGS (ZOR_FLAG_PLAIN_FILE | \
ZOR_FLAG_DIRECTORY | \
ZOR_FLAG_SPACE_MAP | \
ZOR_FLAG_ZAP)
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_RAW 0x0020
#define ZDB_FLAG_PRINT_BLKPTR 0x0040
#define ZDB_FLAG_VERBOSE 0x0080
uint64_t max_inflight_bytes = 256 * 1024 * 1024; /* 256MB */
static int leaked_objects = 0;
static range_tree_t *mos_refd_objs;
static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *,
boolean_t);
static void mos_obj_refd(uint64_t);
static void mos_obj_refd_multiple(uint64_t);
static int dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx);
typedef struct sublivelist_verify {
/* FREE's that haven't yet matched to an ALLOC, in one sub-livelist */
zfs_btree_t sv_pair;
/* ALLOC's without a matching FREE, accumulates across sub-livelists */
zfs_btree_t sv_leftover;
} sublivelist_verify_t;
static int
livelist_compare(const void *larg, const void *rarg)
{
const blkptr_t *l = larg;
const blkptr_t *r = rarg;
/* Sort them according to dva[0] */
uint64_t l_dva0_vdev, r_dva0_vdev;
l_dva0_vdev = DVA_GET_VDEV(&l->blk_dva[0]);
r_dva0_vdev = DVA_GET_VDEV(&r->blk_dva[0]);
if (l_dva0_vdev < r_dva0_vdev)
return (-1);
else if (l_dva0_vdev > r_dva0_vdev)
return (+1);
/* if vdevs are equal, sort by offsets. */
uint64_t l_dva0_offset;
uint64_t r_dva0_offset;
l_dva0_offset = DVA_GET_OFFSET(&l->blk_dva[0]);
r_dva0_offset = DVA_GET_OFFSET(&r->blk_dva[0]);
if (l_dva0_offset < r_dva0_offset) {
return (-1);
} else if (l_dva0_offset > r_dva0_offset) {
return (+1);
}
/*
* Since we're storing blkptrs without cancelling FREE/ALLOC pairs,
* it's possible the offsets are equal. In that case, sort by txg
*/
if (l->blk_birth < r->blk_birth) {
return (-1);
} else if (l->blk_birth > r->blk_birth) {
return (+1);
}
return (0);
}
typedef struct sublivelist_verify_block {
dva_t svb_dva;
/*
* We need this to check if the block marked as allocated
* in the livelist was freed (and potentially reallocated)
* in the metaslab spacemaps at a later TXG.
*/
uint64_t svb_allocated_txg;
} sublivelist_verify_block_t;
static void zdb_print_blkptr(const blkptr_t *bp, int flags);
typedef struct sublivelist_verify_block_refcnt {
/* block pointer entry in livelist being verified */
blkptr_t svbr_blk;
/*
* Refcount gets incremented to 1 when we encounter the first
* FREE entry for the svfbr block pointer and a node for it
* is created in our ZDB verification/tracking metadata.
*
* As we encounter more FREE entries we increment this counter
* and similarly decrement it whenever we find the respective
* ALLOC entries for this block.
*
* When the refcount gets to 0 it means that all the FREE and
* ALLOC entries of this block have paired up and we no longer
* need to track it in our verification logic (e.g. the node
* containing this struct in our verification data structure
* should be freed).
*
* [refer to sublivelist_verify_blkptr() for the actual code]
*/
uint32_t svbr_refcnt;
} sublivelist_verify_block_refcnt_t;
static int
sublivelist_block_refcnt_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_refcnt_t *l = larg;
const sublivelist_verify_block_refcnt_t *r = rarg;
return (livelist_compare(&l->svbr_blk, &r->svbr_blk));
}
static int
sublivelist_verify_blkptr(void *arg, const blkptr_t *bp, boolean_t free,
dmu_tx_t *tx)
{
ASSERT3P(tx, ==, NULL);
struct sublivelist_verify *sv = arg;
sublivelist_verify_block_refcnt_t current = {
.svbr_blk = *bp,
/*
* Start with 1 in case this is the first free entry.
* This field is not used for our B-Tree comparisons
* anyway.
*/
.svbr_refcnt = 1,
};
zfs_btree_index_t where;
sublivelist_verify_block_refcnt_t *pair =
zfs_btree_find(&sv->sv_pair, &current, &where);
if (free) {
if (pair == NULL) {
/* first free entry for this block pointer */
zfs_btree_add(&sv->sv_pair, &current);
} else {
pair->svbr_refcnt++;
}
} else {
if (pair == NULL) {
/* block that is currently marked as allocated */
for (int i = 0; i < SPA_DVAS_PER_BP; i++) {
if (DVA_IS_EMPTY(&bp->blk_dva[i]))
break;
sublivelist_verify_block_t svb = {
.svb_dva = bp->blk_dva[i],
.svb_allocated_txg = bp->blk_birth
};
if (zfs_btree_find(&sv->sv_leftover, &svb,
&where) == NULL) {
zfs_btree_add_idx(&sv->sv_leftover,
&svb, &where);
}
}
} else {
/* alloc matches a free entry */
pair->svbr_refcnt--;
if (pair->svbr_refcnt == 0) {
/* all allocs and frees have been matched */
zfs_btree_remove_idx(&sv->sv_pair, &where);
}
}
}
return (0);
}
static int
sublivelist_verify_func(void *args, dsl_deadlist_entry_t *dle)
{
int err;
struct sublivelist_verify *sv = args;
zfs_btree_create(&sv->sv_pair, sublivelist_block_refcnt_compare,
sizeof (sublivelist_verify_block_refcnt_t));
err = bpobj_iterate_nofree(&dle->dle_bpobj, sublivelist_verify_blkptr,
sv, NULL);
sublivelist_verify_block_refcnt_t *e;
zfs_btree_index_t *cookie = NULL;
while ((e = zfs_btree_destroy_nodes(&sv->sv_pair, &cookie)) != NULL) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf),
&e->svbr_blk, B_TRUE);
(void) printf("\tERROR: %d unmatched FREE(s): %s\n",
e->svbr_refcnt, blkbuf);
}
zfs_btree_destroy(&sv->sv_pair);
return (err);
}
static int
livelist_block_compare(const void *larg, const void *rarg)
{
const sublivelist_verify_block_t *l = larg;
const sublivelist_verify_block_t *r = rarg;
if (DVA_GET_VDEV(&l->svb_dva) < DVA_GET_VDEV(&r->svb_dva))
return (-1);
else if (DVA_GET_VDEV(&l->svb_dva) > DVA_GET_VDEV(&r->svb_dva))
return (+1);
if (DVA_GET_OFFSET(&l->svb_dva) < DVA_GET_OFFSET(&r->svb_dva))
return (-1);
else if (DVA_GET_OFFSET(&l->svb_dva) > DVA_GET_OFFSET(&r->svb_dva))
return (+1);
if (DVA_GET_ASIZE(&l->svb_dva) < DVA_GET_ASIZE(&r->svb_dva))
return (-1);
else if (DVA_GET_ASIZE(&l->svb_dva) > DVA_GET_ASIZE(&r->svb_dva))
return (+1);
return (0);
}
/*
* Check for errors in a livelist while tracking all unfreed ALLOCs in the
* sublivelist_verify_t: sv->sv_leftover
*/
static void
livelist_verify(dsl_deadlist_t *dl, void *arg)
{
sublivelist_verify_t *sv = arg;
dsl_deadlist_iterate(dl, sublivelist_verify_func, sv);
}
/*
* Check for errors in the livelist entry and discard the intermediary
* data structures
*/
/* ARGSUSED */
static int
sublivelist_verify_lightweight(void *args, dsl_deadlist_entry_t *dle)
{
sublivelist_verify_t sv;
zfs_btree_create(&sv.sv_leftover, livelist_block_compare,
sizeof (sublivelist_verify_block_t));
int err = sublivelist_verify_func(&sv, dle);
zfs_btree_clear(&sv.sv_leftover);
zfs_btree_destroy(&sv.sv_leftover);
return (err);
}
typedef struct metaslab_verify {
/*
* Tree containing all the leftover ALLOCs from the livelists
* that are part of this metaslab.
*/
zfs_btree_t mv_livelist_allocs;
/*
* Metaslab information.
*/
uint64_t mv_vdid;
uint64_t mv_msid;
uint64_t mv_start;
uint64_t mv_end;
/*
* What's currently allocated for this metaslab.
*/
range_tree_t *mv_allocated;
} metaslab_verify_t;
typedef void ll_iter_t(dsl_deadlist_t *ll, void *arg);
typedef int (*zdb_log_sm_cb_t)(spa_t *spa, space_map_entry_t *sme, uint64_t txg,
void *arg);
typedef struct unflushed_iter_cb_arg {
spa_t *uic_spa;
uint64_t uic_txg;
void *uic_arg;
zdb_log_sm_cb_t uic_cb;
} unflushed_iter_cb_arg_t;
static int
iterate_through_spacemap_logs_cb(space_map_entry_t *sme, void *arg)
{
unflushed_iter_cb_arg_t *uic = arg;
return (uic->uic_cb(uic->uic_spa, sme, uic->uic_txg, uic->uic_arg));
}
static void
iterate_through_spacemap_logs(spa_t *spa, zdb_log_sm_cb_t cb, void *arg)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
unflushed_iter_cb_arg_t uic = {
.uic_spa = spa,
.uic_txg = sls->sls_txg,
.uic_arg = arg,
.uic_cb = cb
};
VERIFY0(space_map_iterate(sm, space_map_length(sm),
iterate_through_spacemap_logs_cb, &uic));
space_map_close(sm);
}
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static void
verify_livelist_allocs(metaslab_verify_t *mv, uint64_t txg,
uint64_t offset, uint64_t size)
{
sublivelist_verify_block_t svb;
DVA_SET_VDEV(&svb.svb_dva, mv->mv_vdid);
DVA_SET_OFFSET(&svb.svb_dva, offset);
DVA_SET_ASIZE(&svb.svb_dva, size);
zfs_btree_index_t where;
uint64_t end_offset = offset + size;
/*
* Look for an exact match for spacemap entry in the livelist entries.
* Then, look for other livelist entries that fall within the range
* of the spacemap entry as it may have been condensed
*/
sublivelist_verify_block_t *found =
zfs_btree_find(&mv->mv_livelist_allocs, &svb, &where);
if (found == NULL) {
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where);
}
for (; found != NULL && DVA_GET_VDEV(&found->svb_dva) == mv->mv_vdid &&
DVA_GET_OFFSET(&found->svb_dva) < end_offset;
found = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
if (found->svb_allocated_txg <= txg) {
(void) printf("ERROR: Livelist ALLOC [%llx:%llx] "
"from TXG %llx FREED at TXG %llx\n",
(u_longlong_t)DVA_GET_OFFSET(&found->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&found->svb_dva),
(u_longlong_t)found->svb_allocated_txg,
(u_longlong_t)txg);
}
}
}
static int
metaslab_spacemap_validation_cb(space_map_entry_t *sme, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t txg = sme->sme_txg;
if (sme->sme_type == SM_ALLOC) {
if (range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE ALLOC: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_add(mv->mv_allocated,
offset, size);
}
} else {
if (!range_tree_contains(mv->mv_allocated,
offset, size)) {
(void) printf("ERROR: DOUBLE FREE: "
"%llu [%llx:%llx] "
"%llu:%llu LOG_SM\n",
(u_longlong_t)txg, (u_longlong_t)offset,
(u_longlong_t)size, (u_longlong_t)mv->mv_vdid,
(u_longlong_t)mv->mv_msid);
} else {
range_tree_remove(mv->mv_allocated,
offset, size);
}
}
if (sme->sme_type != SM_ALLOC) {
/*
* If something is freed in the spacemap, verify that
* it is not listed as allocated in the livelist.
*/
verify_livelist_allocs(mv, txg, offset, size);
}
return (0);
}
static int
spacemap_check_sm_log_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
metaslab_verify_t *mv = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
if (vdev_id != mv->mv_vdid)
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
if (ms->ms_id != mv->mv_msid)
return (0);
if (txg < metaslab_unflushed_txg(ms))
return (0);
ASSERT3U(txg, ==, sme->sme_txg);
return (metaslab_spacemap_validation_cb(sme, mv));
}
static void
spacemap_check_sm_log(spa_t *spa, metaslab_verify_t *mv)
{
iterate_through_spacemap_logs(spa, spacemap_check_sm_log_cb, mv);
}
static void
spacemap_check_ms_sm(space_map_t *sm, metaslab_verify_t *mv)
{
if (sm == NULL)
return;
VERIFY0(space_map_iterate(sm, space_map_length(sm),
metaslab_spacemap_validation_cb, mv));
}
static void iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg);
/*
* Transfer blocks from sv_leftover tree to the mv_livelist_allocs if
* they are part of that metaslab (mv_msid).
*/
static void
mv_populate_livelist_allocs(metaslab_verify_t *mv, sublivelist_verify_t *sv)
{
zfs_btree_index_t where;
sublivelist_verify_block_t *svb;
ASSERT3U(zfs_btree_numnodes(&mv->mv_livelist_allocs), ==, 0);
for (svb = zfs_btree_first(&sv->sv_leftover, &where);
svb != NULL;
svb = zfs_btree_next(&sv->sv_leftover, &where, &where)) {
if (DVA_GET_VDEV(&svb->svb_dva) != mv->mv_vdid)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start &&
(DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_start) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
if (DVA_GET_OFFSET(&svb->svb_dva) < mv->mv_start)
continue;
if (DVA_GET_OFFSET(&svb->svb_dva) >= mv->mv_end)
continue;
if ((DVA_GET_OFFSET(&svb->svb_dva) +
DVA_GET_ASIZE(&svb->svb_dva)) > mv->mv_end) {
(void) printf("ERROR: Found block that crosses "
"metaslab boundary: <%llu:%llx:%llx>\n",
(u_longlong_t)DVA_GET_VDEV(&svb->svb_dva),
(u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva));
continue;
}
zfs_btree_add(&mv->mv_livelist_allocs, svb);
}
for (svb = zfs_btree_first(&mv->mv_livelist_allocs, &where);
svb != NULL;
svb = zfs_btree_next(&mv->mv_livelist_allocs, &where, &where)) {
zfs_btree_remove(&sv->sv_leftover, svb);
}
}
/*
* [Livelist Check]
* Iterate through all the sublivelists and:
* - report leftover frees (**)
* - record leftover ALLOCs together with their TXG [see Cross Check]
*
* (**) Note: Double ALLOCs are valid in datasets that have dedup
* enabled. Similarly double FREEs are allowed as well but
* only if they pair up with a corresponding ALLOC entry once
* we our done with our sublivelist iteration.
*
* [Spacemap Check]
* for each metaslab:
* - iterate over spacemap and then the metaslab's entries in the
* spacemap log, then report any double FREEs and ALLOCs (do not
* blow up).
*
* [Cross Check]
* After finishing the Livelist Check phase and while being in the
* Spacemap Check phase, we find all the recorded leftover ALLOCs
* of the livelist check that are part of the metaslab that we are
* currently looking at in the Spacemap Check. We report any entries
* that are marked as ALLOCs in the livelists but have been actually
* freed (and potentially allocated again) after their TXG stamp in
* the spacemaps. Also report any ALLOCs from the livelists that
* belong to indirect vdevs (e.g. their vdev completed removal).
*
* Note that this will miss Log Spacemap entries that cancelled each other
* out before being flushed to the metaslab, so we are not guaranteed
* to match all erroneous ALLOCs.
*/
static void
livelist_metaslab_validate(spa_t *spa)
{
(void) printf("Verifying deleted livelist entries\n");
sublivelist_verify_t sv;
zfs_btree_create(&sv.sv_leftover, livelist_block_compare,
sizeof (sublivelist_verify_block_t));
iterate_deleted_livelists(spa, livelist_verify, &sv);
(void) printf("Verifying metaslab entries\n");
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
for (uint64_t mid = 0; mid < vd->vdev_ms_count; mid++) {
metaslab_t *m = vd->vdev_ms[mid];
(void) fprintf(stderr,
"\rverifying concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)mid,
(longlong_t)vd->vdev_ms_count);
uint64_t shift, start;
range_seg_type_t type =
metaslab_calculate_range_tree_type(vd, m,
&start, &shift);
metaslab_verify_t mv;
mv.mv_allocated = range_tree_create(NULL,
type, NULL, start, shift);
mv.mv_vdid = vd->vdev_id;
mv.mv_msid = m->ms_id;
mv.mv_start = m->ms_start;
mv.mv_end = m->ms_start + m->ms_size;
zfs_btree_create(&mv.mv_livelist_allocs,
livelist_block_compare,
sizeof (sublivelist_verify_block_t));
mv_populate_livelist_allocs(&mv, &sv);
spacemap_check_ms_sm(m->ms_sm, &mv);
spacemap_check_sm_log(spa, &mv);
range_tree_vacate(mv.mv_allocated, NULL, NULL);
range_tree_destroy(mv.mv_allocated);
zfs_btree_clear(&mv.mv_livelist_allocs);
zfs_btree_destroy(&mv.mv_livelist_allocs);
}
}
(void) fprintf(stderr, "\n");
/*
* If there are any segments in the leftover tree after we walked
* through all the metaslabs in the concrete vdevs then this means
* that we have segments in the livelists that belong to indirect
* vdevs and are marked as allocated.
*/
if (zfs_btree_numnodes(&sv.sv_leftover) == 0) {
zfs_btree_destroy(&sv.sv_leftover);
return;
}
(void) printf("ERROR: Found livelist blocks marked as allocated "
"for indirect vdevs:\n");
zfs_btree_index_t *where = NULL;
sublivelist_verify_block_t *svb;
while ((svb = zfs_btree_destroy_nodes(&sv.sv_leftover, &where)) !=
NULL) {
int vdev_id = DVA_GET_VDEV(&svb->svb_dva);
ASSERT3U(vdev_id, <, rvd->vdev_children);
vdev_t *vd = rvd->vdev_child[vdev_id];
ASSERT(!vdev_is_concrete(vd));
(void) printf("<%d:%llx:%llx> TXG %llx\n",
vdev_id, (u_longlong_t)DVA_GET_OFFSET(&svb->svb_dva),
(u_longlong_t)DVA_GET_ASIZE(&svb->svb_dva),
(u_longlong_t)svb->svb_allocated_txg);
}
(void) printf("\n");
zfs_btree_destroy(&sv.sv_leftover);
}
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
static void
usage(void)
{
(void) fprintf(stderr,
"Usage:\t%s [-AbcdDFGhikLMPsvXy] [-e [-V] [-p <path> ...]] "
"[-I <inflight I/Os>]\n"
"\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]]\n"
"\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>]\n"
"\t\t[<poolname>[/<dataset | objset id>] [<object | range> ...]\n"
"\t%s [-v] <bookmark>\n"
"\t%s -C [-A] [-U <cache>]\n"
"\t%s -l [-Aqu] <device>\n"
"\t%s -m [-AFLPX] [-e [-V] [-p <path> ...]] [-t <txg>] "
"[-U <cache>]\n\t\t<poolname> [<vdev> [<metaslab> ...]]\n"
"\t%s -O <dataset> <path>\n"
"\t%s -r <dataset> <path> <destination>\n"
"\t%s -R [-A] [-e [-V] [-p <path> ...]] [-U <cache>]\n"
"\t\t<poolname> <vdev>:<offset>:<size>[:<flags>]\n"
"\t%s -E [-A] word0:word1:...:word15\n"
"\t%s -S [-AP] [-e [-V] [-p <path> ...]] [-U <cache>] "
"<poolname>\n\n",
cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname,
cmdname, cmdname, cmdname, cmdname);
(void) fprintf(stderr, " Dataset name must include at least one "
"separator character '/' or '@'\n");
(void) fprintf(stderr, " If dataset name is specified, only that "
"dataset is dumped\n");
(void) fprintf(stderr, " If object numbers or object number "
"ranges are specified, only those\n"
" objects or ranges are dumped.\n\n");
(void) fprintf(stderr,
" Object ranges take the form <start>:<end>[:<flags>]\n"
" start Starting object number\n"
" end Ending object number, or -1 for no upper bound\n"
" flags Optional flags to select object types:\n"
" A All objects (this is the default)\n"
" d ZFS directories\n"
" f ZFS files \n"
" m SPA space maps\n"
" z ZAPs\n"
" - Negate effect of next flag\n\n");
(void) fprintf(stderr, " Options to control amount of output:\n");
(void) fprintf(stderr, " -b block statistics\n");
(void) fprintf(stderr, " -c checksum all metadata (twice for "
"all data) blocks\n");
(void) fprintf(stderr, " -C config (or cachefile if alone)\n");
(void) fprintf(stderr, " -d dataset(s)\n");
(void) fprintf(stderr, " -D dedup statistics\n");
(void) fprintf(stderr, " -E decode and display block from an "
"embedded block pointer\n");
(void) fprintf(stderr, " -h pool history\n");
(void) fprintf(stderr, " -i intent logs\n");
(void) fprintf(stderr, " -l read label contents\n");
(void) fprintf(stderr, " -k examine the checkpointed state "
"of the pool\n");
(void) fprintf(stderr, " -L disable leak tracking (do not "
"load spacemaps)\n");
(void) fprintf(stderr, " -m metaslabs\n");
(void) fprintf(stderr, " -M metaslab groups\n");
(void) fprintf(stderr, " -O perform object lookups by path\n");
(void) fprintf(stderr, " -r copy an object by path to file\n");
(void) fprintf(stderr, " -R read and display block from a "
"device\n");
(void) fprintf(stderr, " -s report stats on zdb's I/O\n");
(void) fprintf(stderr, " -S simulate dedup to measure effect\n");
(void) fprintf(stderr, " -v verbose (applies to all "
"others)\n");
(void) fprintf(stderr, " -y perform livelist and metaslab "
"validation on any livelists being deleted\n\n");
(void) fprintf(stderr, " Below options are intended for use "
"with other options:\n");
(void) fprintf(stderr, " -A ignore assertions (-A), enable "
"panic recovery (-AA) or both (-AAA)\n");
(void) fprintf(stderr, " -e pool is exported/destroyed/"
"has altroot/not in a cachefile\n");
(void) fprintf(stderr, " -F attempt automatic rewind within "
"safe range of transaction groups\n");
(void) fprintf(stderr, " -G dump zfs_dbgmsg buffer before "
"exiting\n");
(void) fprintf(stderr, " -I <number of inflight I/Os> -- "
"specify the maximum number of\n "
"checksumming I/Os [default is 200]\n");
(void) fprintf(stderr, " -o <variable>=<value> set global "
"variable to an unsigned 32-bit integer\n");
(void) fprintf(stderr, " -p <path> -- use one or more with "
"-e to specify path to vdev dir\n");
(void) fprintf(stderr, " -P print numbers in parseable form\n");
(void) fprintf(stderr, " -q don't print label contents\n");
(void) fprintf(stderr, " -t <txg> -- highest txg to use when "
"searching for uberblocks\n");
(void) fprintf(stderr, " -u uberblock\n");
(void) fprintf(stderr, " -U <cachefile_path> -- use alternate "
"cachefile\n");
(void) fprintf(stderr, " -V do verbatim import\n");
(void) fprintf(stderr, " -x <dumpdir> -- "
"dump all read blocks into specified directory\n");
(void) fprintf(stderr, " -X attempt extreme rewind (does not "
"work with dataset)\n");
(void) fprintf(stderr, " -Y attempt all reconstruction "
"combinations for split blocks\n");
(void) fprintf(stderr, " -Z show ZSTD headers \n");
(void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
"to make only that option verbose\n");
(void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
exit(1);
}
static void
dump_debug_buffer(void)
{
if (dump_opt['G']) {
(void) printf("\n");
(void) fflush(stdout);
zfs_dbgmsg_print("zdb");
}
}
/*
* Called for usage errors that are discovered after a call to spa_open(),
* dmu_bonus_hold(), or pool_match(). abort() is called for other errors.
*/
static void
fatal(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) fprintf(stderr, "%s: ", cmdname);
(void) vfprintf(stderr, fmt, ap);
va_end(ap);
(void) fprintf(stderr, "\n");
dump_debug_buffer();
exit(1);
}
/* ARGSUSED */
static void
dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
{
nvlist_t *nv;
size_t nvsize = *(uint64_t *)data;
char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
umem_free(packed, nvsize);
dump_nvlist(nv, 8);
nvlist_free(nv);
}
/* ARGSUSED */
static void
dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size)
{
spa_history_phys_t *shp = data;
if (shp == NULL)
return;
(void) printf("\t\tpool_create_len = %llu\n",
(u_longlong_t)shp->sh_pool_create_len);
(void) printf("\t\tphys_max_off = %llu\n",
(u_longlong_t)shp->sh_phys_max_off);
(void) printf("\t\tbof = %llu\n",
(u_longlong_t)shp->sh_bof);
(void) printf("\t\teof = %llu\n",
(u_longlong_t)shp->sh_eof);
(void) printf("\t\trecords_lost = %llu\n",
(u_longlong_t)shp->sh_records_lost);
}
static void
zdb_nicenum(uint64_t num, char *buf, size_t buflen)
{
if (dump_opt['P'])
(void) snprintf(buf, buflen, "%llu", (longlong_t)num);
else
nicenum(num, buf, sizeof (buf));
}
static const char histo_stars[] = "****************************************";
static const uint64_t histo_width = sizeof (histo_stars) - 1;
static void
dump_histogram(const uint64_t *histo, int size, int offset)
{
int i;
int minidx = size - 1;
int maxidx = 0;
uint64_t max = 0;
for (i = 0; i < size; i++) {
if (histo[i] > max)
max = histo[i];
if (histo[i] > 0 && i > maxidx)
maxidx = i;
if (histo[i] > 0 && i < minidx)
minidx = i;
}
if (max < histo_width)
max = histo_width;
for (i = minidx; i <= maxidx; i++) {
(void) printf("\t\t\t%3u: %6llu %s\n",
i + offset, (u_longlong_t)histo[i],
&histo_stars[(max - histo[i]) * histo_width / max]);
}
}
static void
dump_zap_stats(objset_t *os, uint64_t object)
{
int error;
zap_stats_t zs;
error = zap_get_stats(os, object, &zs);
if (error)
return;
if (zs.zs_ptrtbl_len == 0) {
ASSERT(zs.zs_num_blocks == 1);
(void) printf("\tmicrozap: %llu bytes, %llu entries\n",
(u_longlong_t)zs.zs_blocksize,
(u_longlong_t)zs.zs_num_entries);
return;
}
(void) printf("\tFat ZAP stats:\n");
(void) printf("\t\tPointer table:\n");
(void) printf("\t\t\t%llu elements\n",
(u_longlong_t)zs.zs_ptrtbl_len);
(void) printf("\t\t\tzt_blk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_blk);
(void) printf("\t\t\tzt_numblks: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_numblks);
(void) printf("\t\t\tzt_shift: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_shift);
(void) printf("\t\t\tzt_blks_copied: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_blks_copied);
(void) printf("\t\t\tzt_nextblk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_nextblk);
(void) printf("\t\tZAP entries: %llu\n",
(u_longlong_t)zs.zs_num_entries);
(void) printf("\t\tLeaf blocks: %llu\n",
(u_longlong_t)zs.zs_num_leafs);
(void) printf("\t\tTotal blocks: %llu\n",
(u_longlong_t)zs.zs_num_blocks);
(void) printf("\t\tzap_block_type: 0x%llx\n",
(u_longlong_t)zs.zs_block_type);
(void) printf("\t\tzap_magic: 0x%llx\n",
(u_longlong_t)zs.zs_magic);
(void) printf("\t\tzap_salt: 0x%llx\n",
(u_longlong_t)zs.zs_salt);
(void) printf("\t\tLeafs with 2^n pointers:\n");
dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks with n*5 entries:\n");
dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks n/10 full:\n");
dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tEntries with n chunks:\n");
dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBuckets with n entries:\n");
dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0);
}
/*ARGSUSED*/
static void
dump_none(objset_t *os, uint64_t object, void *data, size_t size)
{
}
/*ARGSUSED*/
static void
dump_unknown(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) printf("\tUNKNOWN OBJECT TYPE\n");
}
/*ARGSUSED*/
static void
dump_uint8(objset_t *os, uint64_t object, void *data, size_t size)
{
}
/*ARGSUSED*/
static void
dump_uint64(objset_t *os, uint64_t object, void *data, size_t size)
{
uint64_t *arr;
uint64_t oursize;
if (dump_opt['d'] < 6)
return;
if (data == NULL) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(os, object, &doi));
size = doi.doi_max_offset;
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
arr = kmem_alloc(oursize, KM_SLEEP);
int err = dmu_read(os, object, 0, oursize, arr, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(arr, oursize);
return;
}
} else {
/*
* Even though the allocation is already done in this code path,
* we still cap the size to prevent excessive printing.
*/
oursize = MIN(size, 1 << 20);
arr = data;
}
if (size == 0) {
(void) printf("\t\t[]\n");
return;
}
(void) printf("\t\t[%0llx", (u_longlong_t)arr[0]);
for (size_t i = 1; i * sizeof (uint64_t) < oursize; i++) {
if (i % 4 != 0)
(void) printf(", %0llx", (u_longlong_t)arr[i]);
else
(void) printf(",\n\t\t%0llx", (u_longlong_t)arr[i]);
}
if (oursize != size)
(void) printf(", ... ");
(void) printf("]\n");
if (data == NULL)
kmem_free(arr, oursize);
}
/*ARGSUSED*/
static void
dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
zap_cursor_t zc;
zap_attribute_t attr;
void *prop;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
prop = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
(void) zap_lookup(os, object, attr.za_name,
attr.za_integer_length, attr.za_num_integers, prop);
if (attr.za_integer_length == 1) {
if (strcmp(attr.za_name,
DSL_CRYPTO_KEY_MASTER_KEY) == 0 ||
strcmp(attr.za_name,
DSL_CRYPTO_KEY_HMAC_KEY) == 0 ||
strcmp(attr.za_name, DSL_CRYPTO_KEY_IV) == 0 ||
strcmp(attr.za_name, DSL_CRYPTO_KEY_MAC) == 0 ||
strcmp(attr.za_name, DMU_POOL_CHECKSUM_SALT) == 0) {
uint8_t *u8 = prop;
for (i = 0; i < attr.za_num_integers; i++) {
(void) printf("%02x", u8[i]);
}
} else {
(void) printf("%s", (char *)prop);
}
} else {
for (i = 0; i < attr.za_num_integers; i++) {
switch (attr.za_integer_length) {
case 2:
(void) printf("%u ",
((uint16_t *)prop)[i]);
break;
case 4:
(void) printf("%u ",
((uint32_t *)prop)[i]);
break;
case 8:
(void) printf("%lld ",
(u_longlong_t)((int64_t *)prop)[i]);
break;
}
}
}
(void) printf("\n");
umem_free(prop, attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
static void
dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size)
{
bpobj_phys_t *bpop = data;
uint64_t i;
char bytes[32], comp[32], uncomp[32];
/* make sure the output won't get truncated */
CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
if (bpop == NULL)
return;
zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes));
zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp));
(void) printf("\t\tnum_blkptrs = %llu\n",
(u_longlong_t)bpop->bpo_num_blkptrs);
(void) printf("\t\tbytes = %s\n", bytes);
if (size >= BPOBJ_SIZE_V1) {
(void) printf("\t\tcomp = %s\n", comp);
(void) printf("\t\tuncomp = %s\n", uncomp);
}
if (size >= BPOBJ_SIZE_V2) {
(void) printf("\t\tsubobjs = %llu\n",
(u_longlong_t)bpop->bpo_subobjs);
(void) printf("\t\tnum_subobjs = %llu\n",
(u_longlong_t)bpop->bpo_num_subobjs);
}
if (size >= sizeof (*bpop)) {
(void) printf("\t\tnum_freed = %llu\n",
(u_longlong_t)bpop->bpo_num_freed);
}
if (dump_opt['d'] < 5)
return;
for (i = 0; i < bpop->bpo_num_blkptrs; i++) {
char blkbuf[BP_SPRINTF_LEN];
blkptr_t bp;
int err = dmu_read(os, object,
i * sizeof (bp), sizeof (bp), &bp, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
break;
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp,
BP_GET_FREE(&bp));
(void) printf("\t%s\n", blkbuf);
}
}
/* ARGSUSED */
static void
dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size)
{
dmu_object_info_t doi;
int64_t i;
VERIFY0(dmu_object_info(os, object, &doi));
uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP);
int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(subobjs, doi.doi_max_offset);
return;
}
int64_t last_nonzero = -1;
for (i = 0; i < doi.doi_max_offset / 8; i++) {
if (subobjs[i] != 0)
last_nonzero = i;
}
for (i = 0; i <= last_nonzero; i++) {
(void) printf("\t%llu\n", (u_longlong_t)subobjs[i]);
}
kmem_free(subobjs, doi.doi_max_offset);
}
/*ARGSUSED*/
static void
dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
dump_zap_stats(os, object);
/* contents are printed elsewhere, properly decoded */
}
/*ARGSUSED*/
static void
dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size)
{
zap_cursor_t zc;
zap_attribute_t attr;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
(void) printf(" %llx : [%d:%d:%d]\n",
(u_longlong_t)attr.za_first_integer,
(int)ATTR_LENGTH(attr.za_first_integer),
(int)ATTR_BSWAP(attr.za_first_integer),
(int)ATTR_NUM(attr.za_first_integer));
}
zap_cursor_fini(&zc);
}
/*ARGSUSED*/
static void
dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size)
{
zap_cursor_t zc;
zap_attribute_t attr;
uint16_t *layout_attrs;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = [", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
VERIFY(attr.za_integer_length == 2);
layout_attrs = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
VERIFY(zap_lookup(os, object, attr.za_name,
attr.za_integer_length,
attr.za_num_integers, layout_attrs) == 0);
for (i = 0; i != attr.za_num_integers; i++)
(void) printf(" %d ", (int)layout_attrs[i]);
(void) printf("]\n");
umem_free(layout_attrs,
attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
/*ARGSUSED*/
static void
dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size)
{
zap_cursor_t zc;
zap_attribute_t attr;
const char *typenames[] = {
/* 0 */ "not specified",
/* 1 */ "FIFO",
/* 2 */ "Character Device",
/* 3 */ "3 (invalid)",
/* 4 */ "Directory",
/* 5 */ "5 (invalid)",
/* 6 */ "Block Device",
/* 7 */ "7 (invalid)",
/* 8 */ "Regular File",
/* 9 */ "9 (invalid)",
/* 10 */ "Symbolic Link",
/* 11 */ "11 (invalid)",
/* 12 */ "Socket",
/* 13 */ "Door",
/* 14 */ "Event Port",
/* 15 */ "15 (invalid)",
};
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = %lld (type: %s)\n",
attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer),
typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]);
}
zap_cursor_fini(&zc);
}
static int
get_dtl_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_ops->vdev_op_leaf) {
space_map_t *sm = vd->vdev_dtl_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
return (1);
return (0);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_dtl_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_metaslab_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd) {
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
space_map_t *sm = vd->vdev_ms[m]->ms_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
refcount++;
}
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_metaslab_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_obsolete_refcount(vdev_t *vd)
{
uint64_t obsolete_sm_object;
int refcount = 0;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (vd->vdev_top == vd && obsolete_sm_object != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset,
obsolete_sm_object, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
refcount++;
}
} else {
ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
ASSERT3U(obsolete_sm_object, ==, 0);
}
for (unsigned c = 0; c < vd->vdev_children; c++) {
refcount += get_obsolete_refcount(vd->vdev_child[c]);
}
return (refcount);
}
static int
get_prev_obsolete_spacemap_refcount(spa_t *spa)
{
uint64_t prev_obj =
spa->spa_condensing_indirect_phys.scip_prev_obsolete_sm_object;
if (prev_obj != 0) {
dmu_object_info_t doi;
VERIFY0(dmu_object_info(spa->spa_meta_objset, prev_obj, &doi));
if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
return (1);
}
}
return (0);
}
static int
get_checkpoint_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_top == vd && vd->vdev_top_zap != 0 &&
zap_contains(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) == 0)
refcount++;
for (uint64_t c = 0; c < vd->vdev_children; c++)
refcount += get_checkpoint_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_log_spacemap_refcount(spa_t *spa)
{
return (avl_numnodes(&spa->spa_sm_logs_by_txg));
}
static int
verify_spacemap_refcounts(spa_t *spa)
{
uint64_t expected_refcount = 0;
uint64_t actual_refcount;
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM],
&expected_refcount);
actual_refcount = get_dtl_refcount(spa->spa_root_vdev);
actual_refcount += get_metaslab_refcount(spa->spa_root_vdev);
actual_refcount += get_obsolete_refcount(spa->spa_root_vdev);
actual_refcount += get_prev_obsolete_spacemap_refcount(spa);
actual_refcount += get_checkpoint_refcount(spa->spa_root_vdev);
actual_refcount += get_log_spacemap_refcount(spa);
if (expected_refcount != actual_refcount) {
(void) printf("space map refcount mismatch: expected %lld != "
"actual %lld\n",
(longlong_t)expected_refcount,
(longlong_t)actual_refcount);
return (2);
}
return (0);
}
static void
dump_spacemap(objset_t *os, space_map_t *sm)
{
const char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
"INVALID", "INVALID", "INVALID", "INVALID" };
if (sm == NULL)
return;
(void) printf("space map object %llu:\n",
(longlong_t)sm->sm_object);
(void) printf(" smp_length = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_length);
(void) printf(" smp_alloc = 0x%llx\n",
(longlong_t)sm->sm_phys->smp_alloc);
if (dump_opt['d'] < 6 && dump_opt['m'] < 4)
return;
/*
* Print out the freelist entries in both encoded and decoded form.
*/
uint8_t mapshift = sm->sm_shift;
int64_t alloc = 0;
uint64_t word, entry_id = 0;
for (uint64_t offset = 0; offset < space_map_length(sm);
offset += sizeof (word)) {
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (word), &word, DMU_READ_PREFETCH));
if (sm_entry_is_debug(word)) {
uint64_t de_txg = SM_DEBUG_TXG_DECODE(word);
uint64_t de_sync_pass = SM_DEBUG_SYNCPASS_DECODE(word);
if (de_txg == 0) {
(void) printf(
"\t [%6llu] PADDING\n",
(u_longlong_t)entry_id);
} else {
(void) printf(
"\t [%6llu] %s: txg %llu pass %llu\n",
(u_longlong_t)entry_id,
ddata[SM_DEBUG_ACTION_DECODE(word)],
(u_longlong_t)de_txg,
(u_longlong_t)de_sync_pass);
}
entry_id++;
continue;
}
uint8_t words;
char entry_type;
uint64_t entry_off, entry_run, entry_vdev = SM_NO_VDEVID;
if (sm_entry_is_single_word(word)) {
entry_type = (SM_TYPE_DECODE(word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM_OFFSET_DECODE(word) << mapshift) +
sm->sm_start;
entry_run = SM_RUN_DECODE(word) << mapshift;
words = 1;
} else {
/* it is a two-word entry so we read another word */
ASSERT(sm_entry_is_double_word(word));
uint64_t extra_word;
offset += sizeof (extra_word);
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (extra_word), &extra_word,
DMU_READ_PREFETCH));
ASSERT3U(offset, <=, space_map_length(sm));
entry_run = SM2_RUN_DECODE(word) << mapshift;
entry_vdev = SM2_VDEV_DECODE(word);
entry_type = (SM2_TYPE_DECODE(extra_word) == SM_ALLOC) ?
'A' : 'F';
entry_off = (SM2_OFFSET_DECODE(extra_word) <<
mapshift) + sm->sm_start;
words = 2;
}
(void) printf("\t [%6llu] %c range:"
" %010llx-%010llx size: %06llx vdev: %06llu words: %u\n",
(u_longlong_t)entry_id,
entry_type, (u_longlong_t)entry_off,
(u_longlong_t)(entry_off + entry_run),
(u_longlong_t)entry_run,
(u_longlong_t)entry_vdev, words);
if (entry_type == 'A')
alloc += entry_run;
else
alloc -= entry_run;
entry_id++;
}
if (alloc != space_map_allocated(sm)) {
(void) printf("space_map_object alloc (%lld) INCONSISTENT "
"with space map summary (%lld)\n",
(longlong_t)space_map_allocated(sm), (longlong_t)alloc);
}
}
static void
dump_metaslab_stats(metaslab_t *msp)
{
char maxbuf[32];
range_tree_t *rt = msp->ms_allocatable;
zfs_btree_t *t = &msp->ms_allocatable_by_size;
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
/* max sure nicenum has enough space */
CTASSERT(sizeof (maxbuf) >= NN_NUMBUF_SZ);
zdb_nicenum(metaslab_largest_allocatable(msp), maxbuf, sizeof (maxbuf));
(void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n",
"segments", zfs_btree_numnodes(t), "maxsize", maxbuf,
"freepct", free_pct);
(void) printf("\tIn-memory histogram:\n");
dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
dump_metaslab(metaslab_t *msp)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
char freebuf[32];
zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf,
sizeof (freebuf));
(void) printf(
"\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n",
(u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start,
(u_longlong_t)space_map_object(sm), freebuf);
if (dump_opt['m'] > 2 && !dump_opt['L']) {
mutex_enter(&msp->ms_lock);
VERIFY0(metaslab_load(msp));
range_tree_stat_verify(msp->ms_allocatable);
dump_metaslab_stats(msp);
metaslab_unload(msp);
mutex_exit(&msp->ms_lock);
}
if (dump_opt['m'] > 1 && sm != NULL &&
spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
/*
* The space map histogram represents free space in chunks
* of sm_shift (i.e. bucket 0 refers to 2^sm_shift).
*/
(void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n",
(u_longlong_t)msp->ms_fragmentation);
dump_histogram(sm->sm_phys->smp_histogram,
SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift);
}
if (vd->vdev_ops == &vdev_draid_ops)
ASSERT3U(msp->ms_size, <=, 1ULL << vd->vdev_ms_shift);
else
ASSERT3U(msp->ms_size, ==, 1ULL << vd->vdev_ms_shift);
dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
if (spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
(void) printf("\tFlush data:\n\tunflushed txg=%llu\n\n",
(u_longlong_t)metaslab_unflushed_txg(msp));
}
}
static void
print_vdev_metaslab_header(vdev_t *vd)
{
vdev_alloc_bias_t alloc_bias = vd->vdev_alloc_bias;
const char *bias_str = "";
if (alloc_bias == VDEV_BIAS_LOG || vd->vdev_islog) {
bias_str = VDEV_ALLOC_BIAS_LOG;
} else if (alloc_bias == VDEV_BIAS_SPECIAL) {
bias_str = VDEV_ALLOC_BIAS_SPECIAL;
} else if (alloc_bias == VDEV_BIAS_DEDUP) {
bias_str = VDEV_ALLOC_BIAS_DEDUP;
}
uint64_t ms_flush_data_obj = 0;
if (vd->vdev_top_zap != 0) {
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (uint64_t), 1, &ms_flush_data_obj);
if (error != ENOENT) {
ASSERT0(error);
}
}
(void) printf("\tvdev %10llu %s",
(u_longlong_t)vd->vdev_id, bias_str);
if (ms_flush_data_obj != 0) {
(void) printf(" ms_unflushed_phys object %llu",
(u_longlong_t)ms_flush_data_obj);
}
(void) printf("\n\t%-10s%5llu %-19s %-15s %-12s\n",
"metaslabs", (u_longlong_t)vd->vdev_ms_count,
"offset", "spacemap", "free");
(void) printf("\t%15s %19s %15s %12s\n",
"---------------", "-------------------",
"---------------", "------------");
}
static void
dump_metaslab_groups(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
metaslab_class_t *mc = spa_normal_class(spa);
uint64_t fragmentation;
metaslab_class_histogram_verify(mc);
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || mg->mg_class != mc)
continue;
metaslab_group_histogram_verify(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
(void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t"
"fragmentation",
(u_longlong_t)tvd->vdev_id,
(u_longlong_t)tvd->vdev_ms_count);
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
(void) printf("%3s\n", "-");
} else {
(void) printf("%3llu%%\n",
(u_longlong_t)mg->mg_fragmentation);
}
dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
(void) printf("\tpool %s\tfragmentation", spa_name(spa));
fragmentation = metaslab_class_fragmentation(mc);
if (fragmentation == ZFS_FRAG_INVALID)
(void) printf("\t%3s\n", "-");
else
(void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation);
dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
print_vdev_indirect(vdev_t *vd)
{
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib = vd->vdev_indirect_births;
if (vim == NULL) {
ASSERT3P(vib, ==, NULL);
return;
}
ASSERT3U(vdev_indirect_mapping_object(vim), ==,
vic->vic_mapping_object);
ASSERT3U(vdev_indirect_births_object(vib), ==,
vic->vic_births_object);
(void) printf("indirect births obj %llu:\n",
(longlong_t)vic->vic_births_object);
(void) printf(" vib_count = %llu\n",
(longlong_t)vdev_indirect_births_count(vib));
for (uint64_t i = 0; i < vdev_indirect_births_count(vib); i++) {
vdev_indirect_birth_entry_phys_t *cur_vibe =
&vib->vib_entries[i];
(void) printf("\toffset %llx -> txg %llu\n",
(longlong_t)cur_vibe->vibe_offset,
(longlong_t)cur_vibe->vibe_phys_birth_txg);
}
(void) printf("\n");
(void) printf("indirect mapping obj %llu:\n",
(longlong_t)vic->vic_mapping_object);
(void) printf(" vim_max_offset = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_max_offset(vim));
(void) printf(" vim_bytes_mapped = 0x%llx\n",
(longlong_t)vdev_indirect_mapping_bytes_mapped(vim));
(void) printf(" vim_count = %llu\n",
(longlong_t)vdev_indirect_mapping_num_entries(vim));
if (dump_opt['d'] <= 5 && dump_opt['m'] <= 3)
return;
uint32_t *counts = vdev_indirect_mapping_load_obsolete_counts(vim);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
(void) printf("\t<%llx:%llx:%llx> -> "
"<%llx:%llx:%llx> (%x obsolete)\n",
(longlong_t)vd->vdev_id,
(longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(longlong_t)DVA_GET_VDEV(&vimep->vimep_dst),
(longlong_t)DVA_GET_OFFSET(&vimep->vimep_dst),
(longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
counts[i]);
}
(void) printf("\n");
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
objset_t *mos = vd->vdev_spa->spa_meta_objset;
(void) printf("obsolete space map object %llu:\n",
(u_longlong_t)obsolete_sm_object);
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT3U(space_map_object(vd->vdev_obsolete_sm), ==,
obsolete_sm_object);
dump_spacemap(mos, vd->vdev_obsolete_sm);
(void) printf("\n");
}
}
static void
dump_metaslabs(spa_t *spa)
{
vdev_t *vd, *rvd = spa->spa_root_vdev;
uint64_t m, c = 0, children = rvd->vdev_children;
(void) printf("\nMetaslabs:\n");
if (!dump_opt['d'] && zopt_metaslab_args > 0) {
c = zopt_metaslab[0];
if (c >= children)
(void) fatal("bad vdev id: %llu", (u_longlong_t)c);
if (zopt_metaslab_args > 1) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
for (m = 1; m < zopt_metaslab_args; m++) {
if (zopt_metaslab[m] < vd->vdev_ms_count)
dump_metaslab(
vd->vdev_ms[zopt_metaslab[m]]);
else
(void) fprintf(stderr, "bad metaslab "
"number %llu\n",
(u_longlong_t)zopt_metaslab[m]);
}
(void) printf("\n");
return;
}
children = c + 1;
}
for (; c < children; c++) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
print_vdev_indirect(vd);
for (m = 0; m < vd->vdev_ms_count; m++)
dump_metaslab(vd->vdev_ms[m]);
(void) printf("\n");
}
}
static void
dump_log_spacemaps(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
(void) printf("\nLog Space Maps in Pool:\n");
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls)) {
space_map_t *sm = NULL;
VERIFY0(space_map_open(&sm, spa_meta_objset(spa),
sls->sls_sm_obj, 0, UINT64_MAX, SPA_MINBLOCKSHIFT));
(void) printf("Log Spacemap object %llu txg %llu\n",
(u_longlong_t)sls->sls_sm_obj, (u_longlong_t)sls->sls_txg);
dump_spacemap(spa->spa_meta_objset, sm);
space_map_close(sm);
}
(void) printf("\n");
}
static void
dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index)
{
const ddt_phys_t *ddp = dde->dde_phys;
const ddt_key_t *ddk = &dde->dde_key;
const char *types[4] = { "ditto", "single", "double", "triple" };
char blkbuf[BP_SPRINTF_LEN];
blkptr_t blk;
int p;
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk);
(void) printf("index %llx refcnt %llu %s %s\n",
(u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt,
types[p], blkbuf);
}
}
static void
dump_dedup_ratio(const ddt_stat_t *dds)
{
double rL, rP, rD, D, dedup, compress, copies;
if (dds->dds_blocks == 0)
return;
rL = (double)dds->dds_ref_lsize;
rP = (double)dds->dds_ref_psize;
rD = (double)dds->dds_ref_dsize;
D = (double)dds->dds_dsize;
dedup = rD / D;
compress = rL / rP;
copies = rD / rP;
(void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, "
"dedup * compress / copies = %.2f\n\n",
dedup, compress, copies, dedup * compress / copies);
}
static void
dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
{
char name[DDT_NAMELEN];
ddt_entry_t dde;
uint64_t walk = 0;
dmu_object_info_t doi;
uint64_t count, dspace, mspace;
int error;
error = ddt_object_info(ddt, type, class, &doi);
if (error == ENOENT)
return;
ASSERT(error == 0);
error = ddt_object_count(ddt, type, class, &count);
ASSERT(error == 0);
if (count == 0)
return;
dspace = doi.doi_physical_blocks_512 << 9;
mspace = doi.doi_fill_count * doi.doi_data_block_size;
ddt_object_name(ddt, type, class, name);
(void) printf("%s: %llu entries, size %llu on disk, %llu in core\n",
name,
(u_longlong_t)count,
(u_longlong_t)(dspace / count),
(u_longlong_t)(mspace / count));
if (dump_opt['D'] < 3)
return;
zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]);
if (dump_opt['D'] < 4)
return;
if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE)
return;
(void) printf("%s contents:\n\n", name);
while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0)
dump_dde(ddt, &dde, walk);
ASSERT3U(error, ==, ENOENT);
(void) printf("\n");
}
static void
dump_all_ddts(spa_t *spa)
{
ddt_histogram_t ddh_total;
ddt_stat_t dds_total;
bzero(&ddh_total, sizeof (ddh_total));
bzero(&dds_total, sizeof (dds_total));
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
dump_ddt(ddt, type, class);
}
}
}
ddt_get_dedup_stats(spa, &dds_total);
if (dds_total.dds_blocks == 0) {
(void) printf("All DDTs are empty\n");
return;
}
(void) printf("\n");
if (dump_opt['D'] > 1) {
(void) printf("DDT histogram (aggregated over all DDTs):\n");
ddt_get_dedup_histogram(spa, &ddh_total);
zpool_dump_ddt(&dds_total, &ddh_total);
}
dump_dedup_ratio(&dds_total);
}
static void
dump_dtl_seg(void *arg, uint64_t start, uint64_t size)
{
char *prefix = arg;
(void) printf("%s [%llu,%llu) length %llu\n",
prefix,
(u_longlong_t)start,
(u_longlong_t)(start + size),
(u_longlong_t)(size));
}
static void
dump_dtl(vdev_t *vd, int indent)
{
spa_t *spa = vd->vdev_spa;
boolean_t required;
const char *name[DTL_TYPES] = { "missing", "partial", "scrub",
"outage" };
char prefix[256];
spa_vdev_state_enter(spa, SCL_NONE);
required = vdev_dtl_required(vd);
(void) spa_vdev_state_exit(spa, NULL, 0);
if (indent == 0)
(void) printf("\nDirty time logs:\n\n");
(void) printf("\t%*s%s [%s]\n", indent, "",
vd->vdev_path ? vd->vdev_path :
vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa),
required ? "DTL-required" : "DTL-expendable");
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_t *rt = vd->vdev_dtl[t];
if (range_tree_space(rt) == 0)
continue;
(void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
indent + 2, "", name[t]);
range_tree_walk(rt, dump_dtl_seg, prefix);
if (dump_opt['d'] > 5 && vd->vdev_children == 0)
dump_spacemap(spa->spa_meta_objset,
vd->vdev_dtl_sm);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
dump_dtl(vd->vdev_child[c], indent + 4);
}
static void
dump_history(spa_t *spa)
{
nvlist_t **events = NULL;
char *buf;
uint64_t resid, len, off = 0;
uint_t num = 0;
int error;
char tbuf[30];
if ((buf = malloc(SPA_OLD_MAXBLOCKSIZE)) == NULL) {
(void) fprintf(stderr, "%s: unable to allocate I/O buffer\n",
__func__);
return;
}
do {
len = SPA_OLD_MAXBLOCKSIZE;
if ((error = spa_history_get(spa, &off, &len, buf)) != 0) {
(void) fprintf(stderr, "Unable to read history: "
"error %d\n", error);
free(buf);
return;
}
if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0)
break;
off -= resid;
} while (len != 0);
(void) printf("\nHistory:\n");
for (unsigned i = 0; i < num; i++) {
boolean_t printed = B_FALSE;
if (nvlist_exists(events[i], ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
} else {
tbuf[0] = '\0';
}
if (nvlist_exists(events[i], ZPOOL_HIST_CMD)) {
(void) printf("%s %s\n", tbuf,
fnvlist_lookup_string(events[i], ZPOOL_HIST_CMD));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_EVENT)) {
uint64_t ievent;
ievent = fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_INT_EVENT);
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS)
goto next;
(void) printf(" %s [internal %s txg:%ju] %s\n",
tbuf,
zfs_history_event_names[ievent],
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_INT_NAME)) {
(void) printf("%s [txg:%ju] %s", tbuf,
fnvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG),
fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_NAME));
if (nvlist_exists(events[i], ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(events[i],
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(
events[i],
ZPOOL_HIST_DSID));
}
(void) printf(" %s\n", fnvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(events[i], ZPOOL_HIST_IOCTL)) {
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(events[i],
ZPOOL_HIST_IOCTL));
if (nvlist_exists(events[i], ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(events[i],
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(events[i], ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(events[i],
ZPOOL_HIST_ERRNO));
}
} else {
goto next;
}
printed = B_TRUE;
next:
if (dump_opt['h'] > 1) {
if (!printed)
(void) printf("unrecognized record:\n");
dump_nvlist(events[i], 2);
}
}
free(buf);
}
/*ARGSUSED*/
static void
dump_dnode(objset_t *os, uint64_t object, void *data, size_t size)
{
}
static uint64_t
blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb)
{
if (dnp == NULL) {
ASSERT(zb->zb_level < 0);
if (zb->zb_object == 0)
return (zb->zb_blkid);
return (zb->zb_blkid * BP_GET_LSIZE(bp));
}
ASSERT(zb->zb_level >= 0);
return ((zb->zb_blkid <<
(zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) *
dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
}
static void
snprintf_zstd_header(spa_t *spa, char *blkbuf, size_t buflen,
const blkptr_t *bp)
{
abd_t *pabd;
void *buf;
zio_t *zio;
zfs_zstdhdr_t zstd_hdr;
int error;
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_ZSTD)
return;
if (BP_IS_HOLE(bp))
return;
if (BP_IS_EMBEDDED(bp)) {
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
exit(1);
}
decode_embedded_bp_compressed(bp, buf);
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
free(buf);
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:EMBEDDED",
zstd_hdr.c_len, zfs_get_hdrversion(&zstd_hdr),
zfs_get_hdrlevel(&zstd_hdr));
return;
}
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
zio = zio_root(spa, NULL, NULL, 0);
/* Decrypt but don't decompress so we can read the compression header */
zio_nowait(zio_read(zio, spa, bp, pabd, BP_GET_PSIZE(bp), NULL, NULL,
ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW_COMPRESS,
NULL));
error = zio_wait(zio);
if (error) {
(void) fprintf(stderr, "read failed: %d\n", error);
return;
}
buf = abd_borrow_buf_copy(pabd, BP_GET_LSIZE(bp));
memcpy(&zstd_hdr, buf, sizeof (zstd_hdr));
zstd_hdr.c_len = BE_32(zstd_hdr.c_len);
zstd_hdr.raw_version_level = BE_32(zstd_hdr.raw_version_level);
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
" ZSTD:size=%u:version=%u:level=%u:NORMAL",
zstd_hdr.c_len, zfs_get_hdrversion(&zstd_hdr),
zfs_get_hdrlevel(&zstd_hdr));
abd_return_buf_copy(pabd, buf, BP_GET_LSIZE(bp));
}
static void
snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp,
boolean_t bp_freed)
{
const dva_t *dva = bp->blk_dva;
int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1;
int i;
if (dump_opt['b'] >= 6) {
snprintf_blkptr(blkbuf, buflen, bp);
if (bp_freed) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
}
return;
}
if (BP_IS_EMBEDDED(bp)) {
(void) sprintf(blkbuf,
"EMBEDDED et=%u %llxL/%llxP B=%llu",
(int)BPE_GET_ETYPE(bp),
(u_longlong_t)BPE_GET_LSIZE(bp),
(u_longlong_t)BPE_GET_PSIZE(bp),
(u_longlong_t)bp->blk_birth);
return;
}
blkbuf[0] = '\0';
for (i = 0; i < ndvas; i++)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), "%llu:%llx:%llx ",
(u_longlong_t)DVA_GET_VDEV(&dva[i]),
(u_longlong_t)DVA_GET_OFFSET(&dva[i]),
(u_longlong_t)DVA_GET_ASIZE(&dva[i]));
if (BP_IS_HOLE(bp)) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL B=%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)bp->blk_birth);
} else {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL/%llxP F=%llu B=%llu/%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)BP_GET_PSIZE(bp),
(u_longlong_t)BP_GET_FILL(bp),
(u_longlong_t)bp->blk_birth,
(u_longlong_t)BP_PHYSICAL_BIRTH(bp));
if (bp_freed)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " %s", "FREE");
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), " cksum=%llx:%llx:%llx:%llx",
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
}
}
static void
print_indirect(spa_t *spa, blkptr_t *bp, const zbookmark_phys_t *zb,
const dnode_phys_t *dnp)
{
char blkbuf[BP_SPRINTF_LEN];
int l;
if (!BP_IS_EMBEDDED(bp)) {
ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
}
(void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
ASSERT(zb->zb_level >= 0);
for (l = dnp->dn_nlevels - 1; l >= -1; l--) {
if (l == zb->zb_level) {
(void) printf("L%llx", (u_longlong_t)zb->zb_level);
} else {
(void) printf(" ");
}
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, B_FALSE);
if (dump_opt['Z'] && BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD)
snprintf_zstd_header(spa, blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static int
visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
blkptr_t *bp, const zbookmark_phys_t *zb)
{
int err = 0;
if (bp->blk_birth == 0)
return (0);
print_indirect(spa, bp, zb, dnp);
if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
uint64_t fill = 0;
ASSERT(!BP_IS_REDACTED(bp));
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
if (err)
return (err);
ASSERT(buf->b_data);
/* recursively visit blocks below this */
cbp = buf->b_data;
for (i = 0; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
err = visit_indirect(spa, dnp, cbp, &czb);
if (err)
break;
fill += BP_GET_FILL(cbp);
}
if (!err)
ASSERT3U(fill, ==, BP_GET_FILL(bp));
arc_buf_destroy(buf, &buf);
}
return (err);
}
/*ARGSUSED*/
static void
dump_indirect(dnode_t *dn)
{
dnode_phys_t *dnp = dn->dn_phys;
int j;
zbookmark_phys_t czb;
(void) printf("Indirect blocks:\n");
SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset),
dn->dn_object, dnp->dn_nlevels - 1, 0);
for (j = 0; j < dnp->dn_nblkptr; j++) {
czb.zb_blkid = j;
(void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp,
&dnp->dn_blkptr[j], &czb);
}
(void) printf("\n");
}
/*ARGSUSED*/
static void
dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size)
{
dsl_dir_phys_t *dd = data;
time_t crtime;
char nice[32];
/* make sure nicenum has enough space */
CTASSERT(sizeof (nice) >= NN_NUMBUF_SZ);
if (dd == NULL)
return;
ASSERT3U(size, >=, sizeof (dsl_dir_phys_t));
crtime = dd->dd_creation_time;
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\thead_dataset_obj = %llu\n",
(u_longlong_t)dd->dd_head_dataset_obj);
(void) printf("\t\tparent_dir_obj = %llu\n",
(u_longlong_t)dd->dd_parent_obj);
(void) printf("\t\torigin_obj = %llu\n",
(u_longlong_t)dd->dd_origin_obj);
(void) printf("\t\tchild_dir_zapobj = %llu\n",
(u_longlong_t)dd->dd_child_dir_zapobj);
zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice));
(void) printf("\t\tused_bytes = %s\n", nice);
zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice));
(void) printf("\t\tcompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice));
(void) printf("\t\tuncompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_quota, nice, sizeof (nice));
(void) printf("\t\tquota = %s\n", nice);
zdb_nicenum(dd->dd_reserved, nice, sizeof (nice));
(void) printf("\t\treserved = %s\n", nice);
(void) printf("\t\tprops_zapobj = %llu\n",
(u_longlong_t)dd->dd_props_zapobj);
(void) printf("\t\tdeleg_zapobj = %llu\n",
(u_longlong_t)dd->dd_deleg_zapobj);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)dd->dd_flags);
#define DO(which) \
zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \
sizeof (nice)); \
(void) printf("\t\tused_breakdown[" #which "] = %s\n", nice)
DO(HEAD);
DO(SNAP);
DO(CHILD);
DO(CHILD_RSRV);
DO(REFRSRV);
#undef DO
(void) printf("\t\tclones = %llu\n",
(u_longlong_t)dd->dd_clones);
}
/*ARGSUSED*/
static void
dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size)
{
dsl_dataset_phys_t *ds = data;
time_t crtime;
char used[32], compressed[32], uncompressed[32], unique[32];
char blkbuf[BP_SPRINTF_LEN];
/* make sure nicenum has enough space */
CTASSERT(sizeof (used) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (compressed) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (uncompressed) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (unique) >= NN_NUMBUF_SZ);
if (ds == NULL)
return;
ASSERT(size == sizeof (*ds));
crtime = ds->ds_creation_time;
zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used));
zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed));
zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed,
sizeof (uncompressed));
zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique));
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp);
(void) printf("\t\tdir_obj = %llu\n",
(u_longlong_t)ds->ds_dir_obj);
(void) printf("\t\tprev_snap_obj = %llu\n",
(u_longlong_t)ds->ds_prev_snap_obj);
(void) printf("\t\tprev_snap_txg = %llu\n",
(u_longlong_t)ds->ds_prev_snap_txg);
(void) printf("\t\tnext_snap_obj = %llu\n",
(u_longlong_t)ds->ds_next_snap_obj);
(void) printf("\t\tsnapnames_zapobj = %llu\n",
(u_longlong_t)ds->ds_snapnames_zapobj);
(void) printf("\t\tnum_children = %llu\n",
(u_longlong_t)ds->ds_num_children);
(void) printf("\t\tuserrefs_obj = %llu\n",
(u_longlong_t)ds->ds_userrefs_obj);
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\tcreation_txg = %llu\n",
(u_longlong_t)ds->ds_creation_txg);
(void) printf("\t\tdeadlist_obj = %llu\n",
(u_longlong_t)ds->ds_deadlist_obj);
(void) printf("\t\tused_bytes = %s\n", used);
(void) printf("\t\tcompressed_bytes = %s\n", compressed);
(void) printf("\t\tuncompressed_bytes = %s\n", uncompressed);
(void) printf("\t\tunique = %s\n", unique);
(void) printf("\t\tfsid_guid = %llu\n",
(u_longlong_t)ds->ds_fsid_guid);
(void) printf("\t\tguid = %llu\n",
(u_longlong_t)ds->ds_guid);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)ds->ds_flags);
(void) printf("\t\tnext_clones_obj = %llu\n",
(u_longlong_t)ds->ds_next_clones_obj);
(void) printf("\t\tprops_obj = %llu\n",
(u_longlong_t)ds->ds_props_obj);
(void) printf("\t\tbp = %s\n", blkbuf);
}
/* ARGSUSED */
static int
dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
char blkbuf[BP_SPRINTF_LEN];
if (bp->blk_birth != 0) {
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("\t%s\n", blkbuf);
}
return (0);
}
static void
dump_bptree(objset_t *os, uint64_t obj, const char *name)
{
char bytes[32];
bptree_phys_t *bt;
dmu_buf_t *db;
/* make sure nicenum has enough space */
CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
if (dump_opt['d'] < 3)
return;
VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
bt = db->db_data;
zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes));
(void) printf("\n %s: %llu datasets, %s\n",
name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes);
dmu_buf_rele(db, FTAG);
if (dump_opt['d'] < 5)
return;
(void) printf("\n");
(void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL);
}
/* ARGSUSED */
static int
dump_bpobj_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed, dmu_tx_t *tx)
{
char blkbuf[BP_SPRINTF_LEN];
ASSERT(bp->blk_birth != 0);
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp, bp_freed);
(void) printf("\t%s\n", blkbuf);
return (0);
}
static void
dump_full_bpobj(bpobj_t *bpo, const char *name, int indent)
{
char bytes[32];
char comp[32];
char uncomp[32];
uint64_t i;
/* make sure nicenum has enough space */
CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
if (dump_opt['d'] < 3)
return;
zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes));
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp));
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu freed, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
} else {
(void) printf(" %*s: object %llu, %llu local "
"blkptrs, %llu subobjs in object %llu, "
"%s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
}
for (i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
dump_full_bpobj(&subbpo, "subobj", indent + 1);
bpobj_close(&subbpo);
}
} else {
if (bpo->bpo_havefreed) {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%llu freed, %s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_freed,
bytes);
} else {
(void) printf(" %*s: object %llu, %llu blkptrs, "
"%s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
bytes);
}
}
if (dump_opt['d'] < 5)
return;
if (indent == 0) {
(void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
(void) printf("\n");
}
}
static int
dump_bookmark(dsl_pool_t *dp, char *name, boolean_t print_redact,
boolean_t print_list)
{
int err = 0;
zfs_bookmark_phys_t prop;
objset_t *mos = dp->dp_spa->spa_meta_objset;
err = dsl_bookmark_lookup(dp, name, NULL, &prop);
if (err != 0) {
return (err);
}
(void) printf("\t#%s: ", strchr(name, '#') + 1);
(void) printf("{guid: %llx creation_txg: %llu creation_time: "
"%llu redaction_obj: %llu}\n", (u_longlong_t)prop.zbm_guid,
(u_longlong_t)prop.zbm_creation_txg,
(u_longlong_t)prop.zbm_creation_time,
(u_longlong_t)prop.zbm_redaction_obj);
IMPLY(print_list, print_redact);
if (!print_redact || prop.zbm_redaction_obj == 0)
return (0);
redaction_list_t *rl;
VERIFY0(dsl_redaction_list_hold_obj(dp,
prop.zbm_redaction_obj, FTAG, &rl));
redaction_list_phys_t *rlp = rl->rl_phys;
(void) printf("\tRedacted:\n\t\tProgress: ");
if (rlp->rlp_last_object != UINT64_MAX ||
rlp->rlp_last_blkid != UINT64_MAX) {
(void) printf("%llu %llu (incomplete)\n",
(u_longlong_t)rlp->rlp_last_object,
(u_longlong_t)rlp->rlp_last_blkid);
} else {
(void) printf("complete\n");
}
(void) printf("\t\tSnapshots: [");
for (unsigned int i = 0; i < rlp->rlp_num_snaps; i++) {
if (i > 0)
(void) printf(", ");
(void) printf("%0llu",
(u_longlong_t)rlp->rlp_snaps[i]);
}
(void) printf("]\n\t\tLength: %llu\n",
(u_longlong_t)rlp->rlp_num_entries);
if (!print_list) {
dsl_redaction_list_rele(rl, FTAG);
return (0);
}
if (rlp->rlp_num_entries == 0) {
dsl_redaction_list_rele(rl, FTAG);
(void) printf("\t\tRedaction List: []\n\n");
return (0);
}
redact_block_phys_t *rbp_buf;
uint64_t size;
dmu_object_info_t doi;
VERIFY0(dmu_object_info(mos, prop.zbm_redaction_obj, &doi));
size = doi.doi_max_offset;
rbp_buf = kmem_alloc(size, KM_SLEEP);
err = dmu_read(mos, prop.zbm_redaction_obj, 0, size,
rbp_buf, 0);
if (err != 0) {
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
return (err);
}
(void) printf("\t\tRedaction List: [{object: %llx, offset: "
"%llx, blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[0].rbp_object,
(u_longlong_t)rbp_buf[0].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[0])),
(u_longlong_t)redact_block_get_count(&rbp_buf[0]));
for (size_t i = 1; i < rlp->rlp_num_entries; i++) {
(void) printf(",\n\t\t{object: %llx, offset: %llx, "
"blksz: %x, count: %llx}",
(u_longlong_t)rbp_buf[i].rbp_object,
(u_longlong_t)rbp_buf[i].rbp_blkid,
(uint_t)(redact_block_get_size(&rbp_buf[i])),
(u_longlong_t)redact_block_get_count(&rbp_buf[i]));
}
dsl_redaction_list_rele(rl, FTAG);
kmem_free(rbp_buf, size);
(void) printf("]\n\n");
return (0);
}
static void
dump_bookmarks(objset_t *os, int verbosity)
{
zap_cursor_t zc;
zap_attribute_t attr;
dsl_dataset_t *ds = dmu_objset_ds(os);
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
objset_t *mos = os->os_spa->spa_meta_objset;
if (verbosity < 4)
return;
dsl_pool_config_enter(dp, FTAG);
for (zap_cursor_init(&zc, mos, ds->ds_bookmarks_obj);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
char osname[ZFS_MAX_DATASET_NAME_LEN];
char buf[ZFS_MAX_DATASET_NAME_LEN];
dmu_objset_name(os, osname);
VERIFY3S(0, <=, snprintf(buf, sizeof (buf), "%s#%s", osname,
attr.za_name));
(void) dump_bookmark(dp, buf, verbosity >= 5, verbosity >= 6);
}
zap_cursor_fini(&zc);
dsl_pool_config_exit(dp, FTAG);
}
static void
bpobj_count_refd(bpobj_t *bpo)
{
mos_obj_refd(bpo->bpo_object);
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
mos_obj_refd(bpo->bpo_phys->bpo_subobjs);
for (uint64_t i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
bpobj_count_refd(&subbpo);
bpobj_close(&subbpo);
}
}
}
static int
dsl_deadlist_entry_count_refd(void *arg, dsl_deadlist_entry_t *dle)
{
spa_t *spa = arg;
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dle->dle_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dle->dle_bpobj);
return (0);
}
static int
dsl_deadlist_entry_dump(void *arg, dsl_deadlist_entry_t *dle)
{
ASSERT(arg == NULL);
if (dump_opt['d'] >= 5) {
char buf[128];
(void) snprintf(buf, sizeof (buf),
"mintxg %llu -> obj %llu",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
dump_full_bpobj(&dle->dle_bpobj, buf, 0);
} else {
(void) printf("mintxg %llu -> obj %llu\n",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
}
return (0);
}
static void
dump_blkptr_list(dsl_deadlist_t *dl, char *name)
{
char bytes[32];
char comp[32];
char uncomp[32];
char entries[32];
spa_t *spa = dmu_objset_spa(dl->dl_os);
uint64_t empty_bpobj = spa->spa_dsl_pool->dp_empty_bpobj;
if (dl->dl_oldfmt) {
if (dl->dl_bpobj.bpo_object != empty_bpobj)
bpobj_count_refd(&dl->dl_bpobj);
} else {
mos_obj_refd(dl->dl_object);
dsl_deadlist_iterate(dl, dsl_deadlist_entry_count_refd, spa);
}
/* make sure nicenum has enough space */
CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (entries) >= NN_NUMBUF_SZ);
if (dump_opt['d'] < 3)
return;
if (dl->dl_oldfmt) {
dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0);
return;
}
zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes));
zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp));
zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp));
zdb_nicenum(avl_numnodes(&dl->dl_tree), entries, sizeof (entries));
(void) printf("\n %s: %s (%s/%s comp), %s entries\n",
name, bytes, comp, uncomp, entries);
if (dump_opt['d'] < 4)
return;
(void) printf("\n");
dsl_deadlist_iterate(dl, dsl_deadlist_entry_dump, NULL);
}
static int
verify_dd_livelist(objset_t *os)
{
uint64_t ll_used, used, ll_comp, comp, ll_uncomp, uncomp;
dsl_pool_t *dp = spa_get_dsl(os->os_spa);
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
ASSERT(!dmu_objset_is_snapshot(os));
if (!dsl_deadlist_is_open(&dd->dd_livelist))
return (0);
/* Iterate through the livelist to check for duplicates */
dsl_deadlist_iterate(&dd->dd_livelist, sublivelist_verify_lightweight,
NULL);
dsl_pool_config_enter(dp, FTAG);
dsl_deadlist_space(&dd->dd_livelist, &ll_used,
&ll_comp, &ll_uncomp);
dsl_dataset_t *origin_ds;
ASSERT(dsl_pool_config_held(dp));
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(dd)->dd_origin_obj, FTAG, &origin_ds));
VERIFY0(dsl_dataset_space_written(origin_ds, os->os_dsl_dataset,
&used, &comp, &uncomp));
dsl_dataset_rele(origin_ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
/*
* It's possible that the dataset's uncomp space is larger than the
* livelist's because livelists do not track embedded block pointers
*/
if (used != ll_used || comp != ll_comp || uncomp < ll_uncomp) {
char nice_used[32], nice_comp[32], nice_uncomp[32];
(void) printf("Discrepancy in space accounting:\n");
zdb_nicenum(used, nice_used, sizeof (nice_used));
zdb_nicenum(comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("dir: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
zdb_nicenum(ll_used, nice_used, sizeof (nice_used));
zdb_nicenum(ll_comp, nice_comp, sizeof (nice_comp));
zdb_nicenum(ll_uncomp, nice_uncomp, sizeof (nice_uncomp));
(void) printf("livelist: used %s, comp %s, uncomp %s\n",
nice_used, nice_comp, nice_uncomp);
return (1);
}
return (0);
}
static avl_tree_t idx_tree;
static avl_tree_t domain_tree;
static boolean_t fuid_table_loaded;
static objset_t *sa_os = NULL;
static sa_attr_type_t *sa_attr_table = NULL;
static int
open_objset(const char *path, void *tag, objset_t **osp)
{
int err;
uint64_t sa_attrs = 0;
uint64_t version = 0;
VERIFY3P(sa_os, ==, NULL);
/*
* We can't own an objset if it's redacted. Therefore, we do this
* dance: hold the objset, then acquire a long hold on its dataset, then
* release the pool (which is held as part of holding the objset).
*/
err = dmu_objset_hold(path, tag, osp);
if (err != 0) {
(void) fprintf(stderr, "failed to hold dataset '%s': %s\n",
path, strerror(err));
return (err);
}
dsl_dataset_long_hold(dmu_objset_ds(*osp), tag);
dsl_pool_rele(dmu_objset_pool(*osp), tag);
if (dmu_objset_type(*osp) == DMU_OST_ZFS && !(*osp)->os_encrypted) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &version);
if (version >= ZPL_VERSION_SA) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS,
8, 1, &sa_attrs);
}
err = sa_setup(*osp, sa_attrs, zfs_attr_table, ZPL_END,
&sa_attr_table);
if (err != 0) {
(void) fprintf(stderr, "sa_setup failed: %s\n",
strerror(err));
dsl_dataset_long_rele(dmu_objset_ds(*osp), tag);
dsl_dataset_rele(dmu_objset_ds(*osp), tag);
*osp = NULL;
}
}
sa_os = *osp;
return (0);
}
static void
close_objset(objset_t *os, void *tag)
{
VERIFY3P(os, ==, sa_os);
if (os->os_sa != NULL)
sa_tear_down(os);
dsl_dataset_long_rele(dmu_objset_ds(os), tag);
dsl_dataset_rele(dmu_objset_ds(os), tag);
sa_attr_table = NULL;
sa_os = NULL;
}
static void
fuid_table_destroy(void)
{
if (fuid_table_loaded) {
zfs_fuid_table_destroy(&idx_tree, &domain_tree);
fuid_table_loaded = B_FALSE;
}
}
/*
* print uid or gid information.
* For normal POSIX id just the id is printed in decimal format.
* For CIFS files with FUID the fuid is printed in hex followed by
* the domain-rid string.
*/
static void
print_idstr(uint64_t id, const char *id_type)
{
if (FUID_INDEX(id)) {
char *domain;
domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id));
(void) printf("\t%s %llx [%s-%d]\n", id_type,
(u_longlong_t)id, domain, (int)FUID_RID(id));
} else {
(void) printf("\t%s %llu\n", id_type, (u_longlong_t)id);
}
}
static void
dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
{
uint32_t uid_idx, gid_idx;
uid_idx = FUID_INDEX(uid);
gid_idx = FUID_INDEX(gid);
/* Load domain table, if not already loaded */
if (!fuid_table_loaded && (uid_idx || gid_idx)) {
uint64_t fuid_obj;
/* first find the fuid object. It lives in the master node */
VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
8, 1, &fuid_obj) == 0);
zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
(void) zfs_fuid_table_load(os, fuid_obj,
&idx_tree, &domain_tree);
fuid_table_loaded = B_TRUE;
}
print_idstr(uid, "uid");
print_idstr(gid, "gid");
}
static void
dump_znode_sa_xattr(sa_handle_t *hdl)
{
nvlist_t *sa_xattr;
nvpair_t *elem = NULL;
int sa_xattr_size = 0;
int sa_xattr_entries = 0;
int error;
char *sa_xattr_packed;
error = sa_size(hdl, sa_attr_table[ZPL_DXATTR], &sa_xattr_size);
if (error || sa_xattr_size == 0)
return;
sa_xattr_packed = malloc(sa_xattr_size);
if (sa_xattr_packed == NULL)
return;
error = sa_lookup(hdl, sa_attr_table[ZPL_DXATTR],
sa_xattr_packed, sa_xattr_size);
if (error) {
free(sa_xattr_packed);
return;
}
error = nvlist_unpack(sa_xattr_packed, sa_xattr_size, &sa_xattr, 0);
if (error) {
free(sa_xattr_packed);
return;
}
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL)
sa_xattr_entries++;
(void) printf("\tSA xattrs: %d bytes, %d entries\n\n",
sa_xattr_size, sa_xattr_entries);
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL) {
uchar_t *value;
uint_t cnt, idx;
(void) printf("\t\t%s = ", nvpair_name(elem));
nvpair_value_byte_array(elem, &value, &cnt);
for (idx = 0; idx < cnt; ++idx) {
if (isprint(value[idx]))
(void) putchar(value[idx]);
else
(void) printf("\\%3.3o", value[idx]);
}
(void) putchar('\n');
}
nvlist_free(sa_xattr);
free(sa_xattr_packed);
}
static void
dump_znode_symlink(sa_handle_t *hdl)
{
int sa_symlink_size = 0;
char linktarget[MAXPATHLEN];
linktarget[0] = '\0';
int error;
error = sa_size(hdl, sa_attr_table[ZPL_SYMLINK], &sa_symlink_size);
if (error || sa_symlink_size == 0) {
return;
}
if (sa_lookup(hdl, sa_attr_table[ZPL_SYMLINK],
&linktarget, sa_symlink_size) == 0)
(void) printf("\ttarget %s\n", linktarget);
}
/*ARGSUSED*/
static void
dump_znode(objset_t *os, uint64_t object, void *data, size_t size)
{
char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */
sa_handle_t *hdl;
uint64_t xattr, rdev, gen;
uint64_t uid, gid, mode, fsize, parent, links;
uint64_t pflags;
uint64_t acctm[2], modtm[2], chgtm[2], crtm[2];
time_t z_crtime, z_atime, z_mtime, z_ctime;
sa_bulk_attr_t bulk[12];
int idx = 0;
int error;
VERIFY3P(os, ==, sa_os);
if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) {
(void) printf("Failed to get handle for SA znode\n");
return;
}
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL,
&links, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL,
&mode, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT],
NULL, &parent, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL,
&fsize, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL,
acctm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL,
modtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL,
crtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL,
chgtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL,
&pflags, 8);
if (sa_bulk_lookup(hdl, bulk, idx)) {
(void) sa_handle_destroy(hdl);
return;
}
z_crtime = (time_t)crtm[0];
z_atime = (time_t)acctm[0];
z_mtime = (time_t)modtm[0];
z_ctime = (time_t)chgtm[0];
if (dump_opt['d'] > 4) {
error = zfs_obj_to_path(os, object, path, sizeof (path));
if (error == ESTALE) {
(void) snprintf(path, sizeof (path), "on delete queue");
} else if (error != 0) {
leaked_objects++;
(void) snprintf(path, sizeof (path),
"path not found, possibly leaked");
}
(void) printf("\tpath %s\n", path);
}
if (S_ISLNK(mode))
dump_znode_symlink(hdl);
dump_uidgid(os, uid, gid);
(void) printf("\tatime %s", ctime(&z_atime));
(void) printf("\tmtime %s", ctime(&z_mtime));
(void) printf("\tctime %s", ctime(&z_ctime));
(void) printf("\tcrtime %s", ctime(&z_crtime));
(void) printf("\tgen %llu\n", (u_longlong_t)gen);
(void) printf("\tmode %llo\n", (u_longlong_t)mode);
(void) printf("\tsize %llu\n", (u_longlong_t)fsize);
(void) printf("\tparent %llu\n", (u_longlong_t)parent);
(void) printf("\tlinks %llu\n", (u_longlong_t)links);
(void) printf("\tpflags %llx\n", (u_longlong_t)pflags);
if (dmu_objset_projectquota_enabled(os) && (pflags & ZFS_PROJID)) {
uint64_t projid;
if (sa_lookup(hdl, sa_attr_table[ZPL_PROJID], &projid,
sizeof (uint64_t)) == 0)
(void) printf("\tprojid %llu\n", (u_longlong_t)projid);
}
if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr,
sizeof (uint64_t)) == 0)
(void) printf("\txattr %llu\n", (u_longlong_t)xattr);
if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev,
sizeof (uint64_t)) == 0)
(void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev);
dump_znode_sa_xattr(hdl);
sa_handle_destroy(hdl);
}
/*ARGSUSED*/
static void
dump_acl(objset_t *os, uint64_t object, void *data, size_t size)
{
}
/*ARGSUSED*/
static void
dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size)
{
}
static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
dump_none, /* unallocated */
dump_zap, /* object directory */
dump_uint64, /* object array */
dump_none, /* packed nvlist */
dump_packed_nvlist, /* packed nvlist size */
dump_none, /* bpobj */
dump_bpobj, /* bpobj header */
dump_none, /* SPA space map header */
dump_none, /* SPA space map */
dump_none, /* ZIL intent log */
dump_dnode, /* DMU dnode */
dump_dmu_objset, /* DMU objset */
dump_dsl_dir, /* DSL directory */
dump_zap, /* DSL directory child map */
dump_zap, /* DSL dataset snap map */
dump_zap, /* DSL props */
dump_dsl_dataset, /* DSL dataset */
dump_znode, /* ZFS znode */
dump_acl, /* ZFS V0 ACL */
dump_uint8, /* ZFS plain file */
dump_zpldir, /* ZFS directory */
dump_zap, /* ZFS master node */
dump_zap, /* ZFS delete queue */
dump_uint8, /* zvol object */
dump_zap, /* zvol prop */
dump_uint8, /* other uint8[] */
dump_uint64, /* other uint64[] */
dump_zap, /* other ZAP */
dump_zap, /* persistent error log */
dump_uint8, /* SPA history */
dump_history_offsets, /* SPA history offsets */
dump_zap, /* Pool properties */
dump_zap, /* DSL permissions */
dump_acl, /* ZFS ACL */
dump_uint8, /* ZFS SYSACL */
dump_none, /* FUID nvlist */
dump_packed_nvlist, /* FUID nvlist size */
dump_zap, /* DSL dataset next clones */
dump_zap, /* DSL scrub queue */
dump_zap, /* ZFS user/group/project used */
dump_zap, /* ZFS user/group/project quota */
dump_zap, /* snapshot refcount tags */
dump_ddt_zap, /* DDT ZAP object */
dump_zap, /* DDT statistics */
dump_znode, /* SA object */
dump_zap, /* SA Master Node */
dump_sa_attrs, /* SA attribute registration */
dump_sa_layouts, /* SA attribute layouts */
dump_zap, /* DSL scrub translations */
dump_none, /* fake dedup BP */
dump_zap, /* deadlist */
dump_none, /* deadlist hdr */
dump_zap, /* dsl clones */
dump_bpobj_subobjs, /* bpobj subobjs */
dump_unknown, /* Unknown type, must be last */
};
static boolean_t
match_object_type(dmu_object_type_t obj_type, uint64_t flags)
{
boolean_t match = B_TRUE;
switch (obj_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (!(flags & ZOR_FLAG_DIRECTORY))
match = B_FALSE;
break;
case DMU_OT_PLAIN_FILE_CONTENTS:
if (!(flags & ZOR_FLAG_PLAIN_FILE))
match = B_FALSE;
break;
case DMU_OT_SPACE_MAP:
if (!(flags & ZOR_FLAG_SPACE_MAP))
match = B_FALSE;
break;
default:
if (strcmp(zdb_ot_name(obj_type), "zap") == 0) {
if (!(flags & ZOR_FLAG_ZAP))
match = B_FALSE;
break;
}
/*
* If all bits except some of the supported flags are
* set, the user combined the all-types flag (A) with
* a negated flag to exclude some types (e.g. A-f to
* show all object types except plain files).
*/
if ((flags | ZOR_SUPPORTED_FLAGS) != ZOR_FLAG_ALL_TYPES)
match = B_FALSE;
break;
}
return (match);
}
static void
dump_object(objset_t *os, uint64_t object, int verbosity,
boolean_t *print_header, uint64_t *dnode_slots_used, uint64_t flags)
{
dmu_buf_t *db = NULL;
dmu_object_info_t doi;
dnode_t *dn;
boolean_t dnode_held = B_FALSE;
void *bonus = NULL;
size_t bsize = 0;
char iblk[32], dblk[32], lsize[32], asize[32], fill[32], dnsize[32];
char bonus_size[32];
char aux[50];
int error;
/* make sure nicenum has enough space */
CTASSERT(sizeof (iblk) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (dblk) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (bonus_size) >= NN_NUMBUF_SZ);
if (*print_header) {
(void) printf("\n%10s %3s %5s %5s %5s %6s %5s %6s %s\n",
"Object", "lvl", "iblk", "dblk", "dsize", "dnsize",
"lsize", "%full", "type");
*print_header = 0;
}
if (object == 0) {
dn = DMU_META_DNODE(os);
dmu_object_info_from_dnode(dn, &doi);
} else {
/*
* Encrypted datasets will have sensitive bonus buffers
* encrypted. Therefore we cannot hold the bonus buffer and
* must hold the dnode itself instead.
*/
error = dmu_object_info(os, object, &doi);
if (error)
fatal("dmu_object_info() failed, errno %u", error);
if (os->os_encrypted &&
DMU_OT_IS_ENCRYPTED(doi.doi_bonus_type)) {
error = dnode_hold(os, object, FTAG, &dn);
if (error)
fatal("dnode_hold() failed, errno %u", error);
dnode_held = B_TRUE;
} else {
error = dmu_bonus_hold(os, object, FTAG, &db);
if (error)
fatal("dmu_bonus_hold(%llu) failed, errno %u",
object, error);
bonus = db->db_data;
bsize = db->db_size;
dn = DB_DNODE((dmu_buf_impl_t *)db);
}
}
/*
* Default to showing all object types if no flags were specified.
*/
if (flags != 0 && flags != ZOR_FLAG_ALL_TYPES &&
!match_object_type(doi.doi_type, flags))
goto out;
if (dnode_slots_used)
*dnode_slots_used = doi.doi_dnodesize / DNODE_MIN_SIZE;
zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk));
zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk));
zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize));
zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize));
zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size));
zdb_nicenum(doi.doi_dnodesize, dnsize, sizeof (dnsize));
(void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count *
doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) /
doi.doi_max_offset);
aux[0] = '\0';
if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (K=%s)", ZDB_CHECKSUM_NAME(doi.doi_checksum));
}
if (doi.doi_compress == ZIO_COMPRESS_INHERIT &&
ZIO_COMPRESS_HASLEVEL(os->os_compress) && verbosity >= 6) {
const char *compname = NULL;
if (zfs_prop_index_to_string(ZFS_PROP_COMPRESSION,
ZIO_COMPRESS_RAW(os->os_compress, os->os_complevel),
&compname) == 0) {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux), " (Z=inherit=%s)",
compname);
} else {
(void) snprintf(aux + strlen(aux),
sizeof (aux) - strlen(aux),
" (Z=inherit=%s-unknown)",
ZDB_COMPRESS_NAME(os->os_compress));
}
} else if (doi.doi_compress == ZIO_COMPRESS_INHERIT && verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=inherit=%s)", ZDB_COMPRESS_NAME(os->os_compress));
} else if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=%s)", ZDB_COMPRESS_NAME(doi.doi_compress));
}
(void) printf("%10lld %3u %5s %5s %5s %6s %5s %6s %s%s\n",
(u_longlong_t)object, doi.doi_indirection, iblk, dblk,
asize, dnsize, lsize, fill, zdb_ot_name(doi.doi_type), aux);
if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) {
(void) printf("%10s %3s %5s %5s %5s %5s %5s %6s %s\n",
"", "", "", "", "", "", bonus_size, "bonus",
zdb_ot_name(doi.doi_bonus_type));
}
if (verbosity >= 4) {
(void) printf("\tdnode flags: %s%s%s%s\n",
(dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ?
"USED_BYTES " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ?
"USERUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) ?
"USEROBJUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ?
"SPILL_BLKPTR" : "");
(void) printf("\tdnode maxblkid: %llu\n",
(longlong_t)dn->dn_phys->dn_maxblkid);
if (!dnode_held) {
object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os,
object, bonus, bsize);
} else {
(void) printf("\t\t(bonus encrypted)\n");
}
if (!os->os_encrypted || !DMU_OT_IS_ENCRYPTED(doi.doi_type)) {
object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object,
NULL, 0);
} else {
(void) printf("\t\t(object encrypted)\n");
}
*print_header = B_TRUE;
}
if (verbosity >= 5)
dump_indirect(dn);
if (verbosity >= 5) {
/*
* Report the list of segments that comprise the object.
*/
uint64_t start = 0;
uint64_t end;
uint64_t blkfill = 1;
int minlvl = 1;
if (dn->dn_type == DMU_OT_DNODE) {
minlvl = 0;
blkfill = DNODES_PER_BLOCK;
}
for (;;) {
char segsize[32];
/* make sure nicenum has enough space */
CTASSERT(sizeof (segsize) >= NN_NUMBUF_SZ);
error = dnode_next_offset(dn,
0, &start, minlvl, blkfill, 0);
if (error)
break;
end = start;
error = dnode_next_offset(dn,
DNODE_FIND_HOLE, &end, minlvl, blkfill, 0);
zdb_nicenum(end - start, segsize, sizeof (segsize));
(void) printf("\t\tsegment [%016llx, %016llx)"
" size %5s\n", (u_longlong_t)start,
(u_longlong_t)end, segsize);
if (error)
break;
start = end;
}
}
out:
if (db != NULL)
dmu_buf_rele(db, FTAG);
if (dnode_held)
dnode_rele(dn, FTAG);
}
static void
count_dir_mos_objects(dsl_dir_t *dd)
{
mos_obj_refd(dd->dd_object);
mos_obj_refd(dsl_dir_phys(dd)->dd_child_dir_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_deleg_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_props_zapobj);
mos_obj_refd(dsl_dir_phys(dd)->dd_clones);
/*
* The dd_crypto_obj can be referenced by multiple dsl_dir's.
* Ignore the references after the first one.
*/
mos_obj_refd_multiple(dd->dd_crypto_obj);
}
static void
count_ds_mos_objects(dsl_dataset_t *ds)
{
mos_obj_refd(ds->ds_object);
mos_obj_refd(dsl_dataset_phys(ds)->ds_next_clones_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_props_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_userrefs_obj);
mos_obj_refd(dsl_dataset_phys(ds)->ds_snapnames_zapobj);
mos_obj_refd(ds->ds_bookmarks_obj);
if (!dsl_dataset_is_snapshot(ds)) {
count_dir_mos_objects(ds->ds_dir);
}
}
static const char *objset_types[DMU_OST_NUMTYPES] = {
"NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" };
/*
* Parse a string denoting a range of object IDs of the form
* <start>[:<end>[:flags]], and store the results in zor.
* Return 0 on success. On error, return 1 and update the msg
* pointer to point to a descriptive error message.
*/
static int
parse_object_range(char *range, zopt_object_range_t *zor, char **msg)
{
uint64_t flags = 0;
char *p, *s, *dup, *flagstr, *tmp = NULL;
size_t len;
int i;
int rc = 0;
if (strchr(range, ':') == NULL) {
zor->zor_obj_start = strtoull(range, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in object ID";
rc = 1;
}
+ zor->zor_obj_start = ZDB_MAP_OBJECT_ID(zor->zor_obj_start);
zor->zor_obj_end = zor->zor_obj_start;
return (rc);
}
if (strchr(range, ':') == range) {
*msg = "Invalid leading colon";
rc = 1;
return (rc);
}
len = strlen(range);
if (range[len - 1] == ':') {
*msg = "Invalid trailing colon";
rc = 1;
return (rc);
}
dup = strdup(range);
s = strtok_r(dup, ":", &tmp);
zor->zor_obj_start = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in start object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
zor->zor_obj_end = strtoull(s, &p, 0);
if (*p != '\0') {
*msg = "Invalid characters in end object ID";
rc = 1;
goto out;
}
if (zor->zor_obj_start > zor->zor_obj_end) {
*msg = "Start object ID may not exceed end object ID";
rc = 1;
goto out;
}
s = strtok_r(NULL, ":", &tmp);
if (s == NULL) {
zor->zor_flags = ZOR_FLAG_ALL_TYPES;
goto out;
} else if (strtok_r(NULL, ":", &tmp) != NULL) {
*msg = "Invalid colon-delimited field after flags";
rc = 1;
goto out;
}
flagstr = s;
for (i = 0; flagstr[i]; i++) {
int bit;
boolean_t negation = (flagstr[i] == '-');
if (negation) {
i++;
if (flagstr[i] == '\0') {
*msg = "Invalid trailing negation operator";
rc = 1;
goto out;
}
}
bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
*msg = "Invalid flag";
rc = 1;
goto out;
}
if (negation)
flags &= ~bit;
else
flags |= bit;
}
zor->zor_flags = flags;
+ zor->zor_obj_start = ZDB_MAP_OBJECT_ID(zor->zor_obj_start);
+ zor->zor_obj_end = ZDB_MAP_OBJECT_ID(zor->zor_obj_end);
+
out:
free(dup);
return (rc);
}
static void
dump_objset(objset_t *os)
{
dmu_objset_stats_t dds = { 0 };
uint64_t object, object_count;
uint64_t refdbytes, usedobjs, scratch;
char numbuf[32];
char blkbuf[BP_SPRINTF_LEN + 20];
char osname[ZFS_MAX_DATASET_NAME_LEN];
const char *type = "UNKNOWN";
int verbosity = dump_opt['d'];
boolean_t print_header;
unsigned i;
int error;
uint64_t total_slots_used = 0;
uint64_t max_slot_used = 0;
uint64_t dnode_slots;
uint64_t obj_start;
uint64_t obj_end;
uint64_t flags;
/* make sure nicenum has enough space */
CTASSERT(sizeof (numbuf) >= NN_NUMBUF_SZ);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
print_header = B_TRUE;
if (dds.dds_type < DMU_OST_NUMTYPES)
type = objset_types[dds.dds_type];
if (dds.dds_type == DMU_OST_META) {
dds.dds_creation_txg = TXG_INITIAL;
usedobjs = BP_GET_FILL(os->os_rootbp);
refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)->
dd_used_bytes;
} else {
dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch);
}
ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp));
zdb_nicenum(refdbytes, numbuf, sizeof (numbuf));
if (verbosity >= 4) {
(void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp ");
(void) snprintf_blkptr(blkbuf + strlen(blkbuf),
sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp);
} else {
blkbuf[0] = '\0';
}
dmu_objset_name(os, osname);
(void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, "
"%s, %llu objects%s%s\n",
osname, type, (u_longlong_t)dmu_objset_id(os),
(u_longlong_t)dds.dds_creation_txg,
numbuf, (u_longlong_t)usedobjs, blkbuf,
(dds.dds_inconsistent) ? " (inconsistent)" : "");
for (i = 0; i < zopt_object_args; i++) {
obj_start = zopt_object_ranges[i].zor_obj_start;
obj_end = zopt_object_ranges[i].zor_obj_end;
flags = zopt_object_ranges[i].zor_flags;
object = obj_start;
if (object == 0 || obj_start == obj_end)
dump_object(os, object, verbosity, &print_header, NULL,
flags);
else
object--;
while ((dmu_object_next(os, &object, B_FALSE, 0) == 0) &&
object <= obj_end) {
dump_object(os, object, verbosity, &print_header, NULL,
flags);
}
}
if (zopt_object_args > 0) {
(void) printf("\n");
return;
}
if (dump_opt['i'] != 0 || verbosity >= 2)
dump_intent_log(dmu_objset_zil(os));
if (dmu_objset_ds(os) != NULL) {
dsl_dataset_t *ds = dmu_objset_ds(os);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
if (dsl_deadlist_is_open(&ds->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
dump_blkptr_list(&ds->ds_dir->dd_livelist, "Livelist");
if (verify_dd_livelist(os) != 0)
fatal("livelist is incorrect");
}
if (dsl_dataset_remap_deadlist_exists(ds)) {
(void) printf("ds_remap_deadlist:\n");
dump_blkptr_list(&ds->ds_remap_deadlist, "Deadlist");
}
count_ds_mos_objects(ds);
}
if (dmu_objset_ds(os) != NULL)
dump_bookmarks(os, verbosity);
if (verbosity < 2)
return;
if (BP_IS_HOLE(os->os_rootbp))
return;
dump_object(os, 0, verbosity, &print_header, NULL, 0);
object_count = 0;
if (DMU_USERUSED_DNODE(os) != NULL &&
DMU_USERUSED_DNODE(os)->dn_type != 0) {
dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header,
NULL, 0);
dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header,
NULL, 0);
}
if (DMU_PROJECTUSED_DNODE(os) != NULL &&
DMU_PROJECTUSED_DNODE(os)->dn_type != 0)
dump_object(os, DMU_PROJECTUSED_OBJECT, verbosity,
&print_header, NULL, 0);
object = 0;
while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
dump_object(os, object, verbosity, &print_header, &dnode_slots,
0);
object_count++;
total_slots_used += dnode_slots;
max_slot_used = object + dnode_slots - 1;
}
(void) printf("\n");
(void) printf(" Dnode slots:\n");
(void) printf("\tTotal used: %10llu\n",
(u_longlong_t)total_slots_used);
(void) printf("\tMax used: %10llu\n",
(u_longlong_t)max_slot_used);
(void) printf("\tPercent empty: %10lf\n",
(double)(max_slot_used - total_slots_used)*100 /
(double)max_slot_used);
(void) printf("\n");
if (error != ESRCH) {
(void) fprintf(stderr, "dmu_object_next() = %d\n", error);
abort();
}
ASSERT3U(object_count, ==, usedobjs);
if (leaked_objects != 0) {
(void) printf("%d potentially leaked objects detected\n",
leaked_objects);
leaked_objects = 0;
}
}
static void
dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
{
time_t timestamp = ub->ub_timestamp;
(void) printf("%s", header ? header : "");
(void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic);
(void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version);
(void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg);
(void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum);
(void) printf("\ttimestamp = %llu UTC = %s",
(u_longlong_t)ub->ub_timestamp, asctime(localtime(&timestamp)));
(void) printf("\tmmp_magic = %016llx\n",
(u_longlong_t)ub->ub_mmp_magic);
if (MMP_VALID(ub)) {
(void) printf("\tmmp_delay = %0llu\n",
(u_longlong_t)ub->ub_mmp_delay);
if (MMP_SEQ_VALID(ub))
(void) printf("\tmmp_seq = %u\n",
(unsigned int) MMP_SEQ(ub));
if (MMP_FAIL_INT_VALID(ub))
(void) printf("\tmmp_fail = %u\n",
(unsigned int) MMP_FAIL_INT(ub));
if (MMP_INTERVAL_VALID(ub))
(void) printf("\tmmp_write = %u\n",
(unsigned int) MMP_INTERVAL(ub));
/* After MMP_* to make summarize_uberblock_mmp cleaner */
(void) printf("\tmmp_valid = %x\n",
(unsigned int) ub->ub_mmp_config & 0xFF);
}
if (dump_opt['u'] >= 4) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp);
(void) printf("\trootbp = %s\n", blkbuf);
}
(void) printf("\tcheckpoint_txg = %llu\n",
(u_longlong_t)ub->ub_checkpoint_txg);
(void) printf("%s", footer ? footer : "");
}
static void
dump_config(spa_t *spa)
{
dmu_buf_t *db;
size_t nvsize = 0;
int error = 0;
error = dmu_bonus_hold(spa->spa_meta_objset,
spa->spa_config_object, FTAG, &db);
if (error == 0) {
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
(void) printf("\nMOS Configuration:\n");
dump_packed_nvlist(spa->spa_meta_objset,
spa->spa_config_object, (void *)&nvsize, 1);
} else {
(void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d",
(u_longlong_t)spa->spa_config_object, error);
}
}
static void
dump_cachefile(const char *cachefile)
{
int fd;
struct stat64 statbuf;
char *buf;
nvlist_t *config;
if ((fd = open64(cachefile, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if (fstat64(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if ((buf = malloc(statbuf.st_size)) == NULL) {
(void) fprintf(stderr, "failed to allocate %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
(void) fprintf(stderr, "failed to read %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
(void) close(fd);
if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) {
(void) fprintf(stderr, "failed to unpack nvlist\n");
exit(1);
}
free(buf);
dump_nvlist(config, 0);
nvlist_free(config);
}
/*
* ZFS label nvlist stats
*/
typedef struct zdb_nvl_stats {
int zns_list_count;
int zns_leaf_count;
size_t zns_leaf_largest;
size_t zns_leaf_total;
nvlist_t *zns_string;
nvlist_t *zns_uint64;
nvlist_t *zns_boolean;
} zdb_nvl_stats_t;
static void
collect_nvlist_stats(nvlist_t *nvl, zdb_nvl_stats_t *stats)
{
nvlist_t *list, **array;
nvpair_t *nvp = NULL;
char *name;
uint_t i, items;
stats->zns_list_count++;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
name = nvpair_name(nvp);
switch (nvpair_type(nvp)) {
case DATA_TYPE_STRING:
fnvlist_add_string(stats->zns_string, name,
fnvpair_value_string(nvp));
break;
case DATA_TYPE_UINT64:
fnvlist_add_uint64(stats->zns_uint64, name,
fnvpair_value_uint64(nvp));
break;
case DATA_TYPE_BOOLEAN:
fnvlist_add_boolean(stats->zns_boolean, name);
break;
case DATA_TYPE_NVLIST:
if (nvpair_value_nvlist(nvp, &list) == 0)
collect_nvlist_stats(list, stats);
break;
case DATA_TYPE_NVLIST_ARRAY:
if (nvpair_value_nvlist_array(nvp, &array, &items) != 0)
break;
for (i = 0; i < items; i++) {
collect_nvlist_stats(array[i], stats);
/* collect stats on leaf vdev */
if (strcmp(name, "children") == 0) {
size_t size;
(void) nvlist_size(array[i], &size,
NV_ENCODE_XDR);
stats->zns_leaf_total += size;
if (size > stats->zns_leaf_largest)
stats->zns_leaf_largest = size;
stats->zns_leaf_count++;
}
}
break;
default:
(void) printf("skip type %d!\n", (int)nvpair_type(nvp));
}
}
}
static void
dump_nvlist_stats(nvlist_t *nvl, size_t cap)
{
zdb_nvl_stats_t stats = { 0 };
size_t size, sum = 0, total;
size_t noise;
/* requires nvlist with non-unique names for stat collection */
VERIFY0(nvlist_alloc(&stats.zns_string, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_uint64, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_boolean, 0, 0));
VERIFY0(nvlist_size(stats.zns_boolean, &noise, NV_ENCODE_XDR));
(void) printf("\n\nZFS Label NVList Config Stats:\n");
VERIFY0(nvlist_size(nvl, &total, NV_ENCODE_XDR));
(void) printf(" %d bytes used, %d bytes free (using %4.1f%%)\n\n",
(int)total, (int)(cap - total), 100.0 * total / cap);
collect_nvlist_stats(nvl, &stats);
VERIFY0(nvlist_size(stats.zns_uint64, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "integers:",
(int)fnvlist_num_pairs(stats.zns_uint64),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_string, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "strings:",
(int)fnvlist_num_pairs(stats.zns_string),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_boolean, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "booleans:",
(int)fnvlist_num_pairs(stats.zns_boolean),
(int)size, 100.0 * size / total);
size = total - sum; /* treat remainder as nvlist overhead */
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n\n", "nvlists:",
stats.zns_list_count, (int)size, 100.0 * size / total);
if (stats.zns_leaf_count > 0) {
size_t average = stats.zns_leaf_total / stats.zns_leaf_count;
(void) printf("%12s %4d %6d bytes average\n", "leaf vdevs:",
stats.zns_leaf_count, (int)average);
(void) printf("%24d bytes largest\n",
(int)stats.zns_leaf_largest);
if (dump_opt['l'] >= 3 && average > 0)
(void) printf(" space for %d additional leaf vdevs\n",
(int)((cap - total) / average));
}
(void) printf("\n");
nvlist_free(stats.zns_string);
nvlist_free(stats.zns_uint64);
nvlist_free(stats.zns_boolean);
}
typedef struct cksum_record {
zio_cksum_t cksum;
boolean_t labels[VDEV_LABELS];
avl_node_t link;
} cksum_record_t;
static int
cksum_record_compare(const void *x1, const void *x2)
{
const cksum_record_t *l = (cksum_record_t *)x1;
const cksum_record_t *r = (cksum_record_t *)x2;
int arraysize = ARRAY_SIZE(l->cksum.zc_word);
int difference;
for (int i = 0; i < arraysize; i++) {
difference = TREE_CMP(l->cksum.zc_word[i], r->cksum.zc_word[i]);
if (difference)
break;
}
return (difference);
}
static cksum_record_t *
cksum_record_alloc(zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = umem_zalloc(sizeof (*rec), UMEM_NOFAIL);
rec->cksum = *cksum;
rec->labels[l] = B_TRUE;
return (rec);
}
static cksum_record_t *
cksum_record_lookup(avl_tree_t *tree, zio_cksum_t *cksum)
{
cksum_record_t lookup = { .cksum = *cksum };
avl_index_t where;
return (avl_find(tree, &lookup, &where));
}
static cksum_record_t *
cksum_record_insert(avl_tree_t *tree, zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = cksum_record_lookup(tree, cksum);
if (rec) {
rec->labels[l] = B_TRUE;
} else {
rec = cksum_record_alloc(cksum, l);
avl_add(tree, rec);
}
return (rec);
}
static int
first_label(cksum_record_t *rec)
{
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i])
return (i);
return (-1);
}
static void
print_label_numbers(char *prefix, cksum_record_t *rec)
{
printf("%s", prefix);
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i] == B_TRUE)
printf("%d ", i);
printf("\n");
}
#define MAX_UBERBLOCK_COUNT (VDEV_UBERBLOCK_RING >> UBERBLOCK_SHIFT)
typedef struct zdb_label {
vdev_label_t label;
nvlist_t *config_nv;
cksum_record_t *config;
cksum_record_t *uberblocks[MAX_UBERBLOCK_COUNT];
boolean_t header_printed;
boolean_t read_failed;
} zdb_label_t;
static void
print_label_header(zdb_label_t *label, int l)
{
if (dump_opt['q'])
return;
if (label->header_printed == B_TRUE)
return;
(void) printf("------------------------------------\n");
(void) printf("LABEL %d\n", l);
(void) printf("------------------------------------\n");
label->header_printed = B_TRUE;
}
static void
print_l2arc_header(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device header\n");
(void) printf("------------------------------------\n");
}
static void
print_l2arc_log_blocks(void)
{
(void) printf("------------------------------------\n");
(void) printf("L2ARC device log blocks\n");
(void) printf("------------------------------------\n");
}
static void
dump_l2arc_log_entries(uint64_t log_entries,
l2arc_log_ent_phys_t *le, uint64_t i)
{
for (int j = 0; j < log_entries; j++) {
dva_t dva = le[j].le_dva;
(void) printf("lb[%4llu]\tle[%4d]\tDVA asize: %llu, "
"vdev: %llu, offset: %llu\n",
(u_longlong_t)i, j + 1,
(u_longlong_t)DVA_GET_ASIZE(&dva),
(u_longlong_t)DVA_GET_VDEV(&dva),
(u_longlong_t)DVA_GET_OFFSET(&dva));
(void) printf("|\t\t\t\tbirth: %llu\n",
(u_longlong_t)le[j].le_birth);
(void) printf("|\t\t\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tpsize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE((&le[j])->le_prop));
(void) printf("|\t\t\t\tcompr: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS((&le[j])->le_prop));
(void) printf("|\t\t\t\tcomplevel: %llu\n",
(u_longlong_t)(&le[j])->le_complevel);
(void) printf("|\t\t\t\ttype: %llu\n",
(u_longlong_t)L2BLK_GET_TYPE((&le[j])->le_prop));
(void) printf("|\t\t\t\tprotected: %llu\n",
(u_longlong_t)L2BLK_GET_PROTECTED((&le[j])->le_prop));
(void) printf("|\t\t\t\tprefetch: %llu\n",
(u_longlong_t)L2BLK_GET_PREFETCH((&le[j])->le_prop));
(void) printf("|\t\t\t\taddress: %llu\n",
(u_longlong_t)le[j].le_daddr);
(void) printf("|\t\t\t\tARC state: %llu\n",
(u_longlong_t)L2BLK_GET_STATE((&le[j])->le_prop));
(void) printf("|\n");
}
(void) printf("\n");
}
static void
dump_l2arc_log_blkptr(l2arc_log_blkptr_t lbps)
{
(void) printf("|\t\tdaddr: %llu\n", (u_longlong_t)lbps.lbp_daddr);
(void) printf("|\t\tpayload_asize: %llu\n",
(u_longlong_t)lbps.lbp_payload_asize);
(void) printf("|\t\tpayload_start: %llu\n",
(u_longlong_t)lbps.lbp_payload_start);
(void) printf("|\t\tlsize: %llu\n",
(u_longlong_t)L2BLK_GET_LSIZE((&lbps)->lbp_prop));
(void) printf("|\t\tasize: %llu\n",
(u_longlong_t)L2BLK_GET_PSIZE((&lbps)->lbp_prop));
(void) printf("|\t\tcompralgo: %llu\n",
(u_longlong_t)L2BLK_GET_COMPRESS((&lbps)->lbp_prop));
(void) printf("|\t\tcksumalgo: %llu\n",
(u_longlong_t)L2BLK_GET_CHECKSUM((&lbps)->lbp_prop));
(void) printf("|\n\n");
}
static void
dump_l2arc_log_blocks(int fd, l2arc_dev_hdr_phys_t l2dhdr,
l2arc_dev_hdr_phys_t *rebuild)
{
l2arc_log_blk_phys_t this_lb;
uint64_t asize;
l2arc_log_blkptr_t lbps[2];
abd_t *abd;
zio_cksum_t cksum;
int failed = 0;
l2arc_dev_t dev;
if (!dump_opt['q'])
print_l2arc_log_blocks();
bcopy((&l2dhdr)->dh_start_lbps, lbps, sizeof (lbps));
dev.l2ad_evict = l2dhdr.dh_evict;
dev.l2ad_start = l2dhdr.dh_start;
dev.l2ad_end = l2dhdr.dh_end;
if (l2dhdr.dh_start_lbps[0].lbp_daddr == 0) {
/* no log blocks to read */
if (!dump_opt['q']) {
(void) printf("No log blocks to read\n");
(void) printf("\n");
}
return;
} else {
dev.l2ad_hand = lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
}
dev.l2ad_first = !!(l2dhdr.dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
for (;;) {
if (!l2arc_log_blkptr_valid(&dev, &lbps[0]))
break;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
if (pread64(fd, &this_lb, asize, lbps[0].lbp_daddr) != asize) {
if (!dump_opt['q']) {
(void) printf("Error while reading next log "
"block\n\n");
}
break;
}
fletcher_4_native_varsize(&this_lb, asize, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, lbps[0].lbp_cksum)) {
failed++;
if (!dump_opt['q']) {
(void) printf("Invalid cksum\n");
dump_l2arc_log_blkptr(lbps[0]);
}
break;
}
switch (L2BLK_GET_COMPRESS((&lbps[0])->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
default:
abd = abd_alloc_for_io(asize, B_TRUE);
abd_copy_from_buf_off(abd, &this_lb, 0, asize);
zio_decompress_data(L2BLK_GET_COMPRESS(
(&lbps[0])->lbp_prop), abd, &this_lb,
asize, sizeof (this_lb), NULL);
abd_free(abd);
break;
}
if (this_lb.lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(&this_lb, sizeof (this_lb));
if (this_lb.lb_magic != L2ARC_LOG_BLK_MAGIC) {
if (!dump_opt['q'])
(void) printf("Invalid log block magic\n\n");
break;
}
rebuild->dh_lb_count++;
rebuild->dh_lb_asize += asize;
if (dump_opt['l'] > 1 && !dump_opt['q']) {
(void) printf("lb[%4llu]\tmagic: %llu\n",
(u_longlong_t)rebuild->dh_lb_count,
(u_longlong_t)this_lb.lb_magic);
dump_l2arc_log_blkptr(lbps[0]);
}
if (dump_opt['l'] > 2 && !dump_opt['q'])
dump_l2arc_log_entries(l2dhdr.dh_log_entries,
this_lb.lb_entries,
rebuild->dh_lb_count);
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev.l2ad_evict) &&
!dev.l2ad_first)
break;
lbps[0] = lbps[1];
lbps[1] = this_lb.lb_prev_lbp;
}
if (!dump_opt['q']) {
(void) printf("log_blk_count:\t %llu with valid cksum\n",
(u_longlong_t)rebuild->dh_lb_count);
(void) printf("\t\t %d with invalid cksum\n", failed);
(void) printf("log_blk_asize:\t %llu\n\n",
(u_longlong_t)rebuild->dh_lb_asize);
}
}
static int
dump_l2arc_header(int fd)
{
l2arc_dev_hdr_phys_t l2dhdr, rebuild;
int error = B_FALSE;
bzero(&l2dhdr, sizeof (l2dhdr));
bzero(&rebuild, sizeof (rebuild));
if (pread64(fd, &l2dhdr, sizeof (l2dhdr),
VDEV_LABEL_START_SIZE) != sizeof (l2dhdr)) {
error = B_TRUE;
} else {
if (l2dhdr.dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(&l2dhdr, sizeof (l2dhdr));
if (l2dhdr.dh_magic != L2ARC_DEV_HDR_MAGIC)
error = B_TRUE;
}
if (error) {
(void) printf("L2ARC device header not found\n\n");
/* Do not return an error here for backward compatibility */
return (0);
} else if (!dump_opt['q']) {
print_l2arc_header();
(void) printf(" magic: %llu\n",
(u_longlong_t)l2dhdr.dh_magic);
(void) printf(" version: %llu\n",
(u_longlong_t)l2dhdr.dh_version);
(void) printf(" pool_guid: %llu\n",
(u_longlong_t)l2dhdr.dh_spa_guid);
(void) printf(" flags: %llu\n",
(u_longlong_t)l2dhdr.dh_flags);
(void) printf(" start_lbps[0]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[0].lbp_daddr);
(void) printf(" start_lbps[1]: %llu\n",
(u_longlong_t)
l2dhdr.dh_start_lbps[1].lbp_daddr);
(void) printf(" log_blk_ent: %llu\n",
(u_longlong_t)l2dhdr.dh_log_entries);
(void) printf(" start: %llu\n",
(u_longlong_t)l2dhdr.dh_start);
(void) printf(" end: %llu\n",
(u_longlong_t)l2dhdr.dh_end);
(void) printf(" evict: %llu\n",
(u_longlong_t)l2dhdr.dh_evict);
(void) printf(" lb_asize_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_asize);
(void) printf(" lb_count_refcount: %llu\n",
(u_longlong_t)l2dhdr.dh_lb_count);
(void) printf(" trim_action_time: %llu\n",
(u_longlong_t)l2dhdr.dh_trim_action_time);
(void) printf(" trim_state: %llu\n\n",
(u_longlong_t)l2dhdr.dh_trim_state);
}
dump_l2arc_log_blocks(fd, l2dhdr, &rebuild);
/*
* The total aligned size of log blocks and the number of log blocks
* reported in the header of the device may be less than what zdb
* reports by dump_l2arc_log_blocks() which emulates l2arc_rebuild().
* This happens because dump_l2arc_log_blocks() lacks the memory
* pressure valve that l2arc_rebuild() has. Thus, if we are on a system
* with low memory, l2arc_rebuild will exit prematurely and dh_lb_asize
* and dh_lb_count will be lower to begin with than what exists on the
* device. This is normal and zdb should not exit with an error. The
* opposite case should never happen though, the values reported in the
* header should never be higher than what dump_l2arc_log_blocks() and
* l2arc_rebuild() report. If this happens there is a leak in the
* accounting of log blocks.
*/
if (l2dhdr.dh_lb_asize > rebuild.dh_lb_asize ||
l2dhdr.dh_lb_count > rebuild.dh_lb_count)
return (1);
return (0);
}
static void
dump_config_from_label(zdb_label_t *label, size_t buflen, int l)
{
if (dump_opt['q'])
return;
if ((dump_opt['l'] < 3) && (first_label(label->config) != l))
return;
print_label_header(label, l);
dump_nvlist(label->config_nv, 4);
print_label_numbers(" labels = ", label->config);
if (dump_opt['l'] >= 2)
dump_nvlist_stats(label->config_nv, buflen);
}
#define ZDB_MAX_UB_HEADER_SIZE 32
static void
dump_label_uberblocks(zdb_label_t *label, uint64_t ashift, int label_num)
{
vdev_t vd;
char header[ZDB_MAX_UB_HEADER_SIZE];
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)&label->label + uoff);
cksum_record_t *rec = label->uberblocks[i];
if (rec == NULL) {
if (dump_opt['u'] >= 2) {
print_label_header(label, label_num);
(void) printf(" Uberblock[%d] invalid\n", i);
}
continue;
}
if ((dump_opt['u'] < 3) && (first_label(rec) != label_num))
continue;
if ((dump_opt['u'] < 4) &&
(ub->ub_mmp_magic == MMP_MAGIC) && ub->ub_mmp_delay &&
(i >= VDEV_UBERBLOCK_COUNT(&vd) - MMP_BLOCKS_PER_LABEL))
continue;
print_label_header(label, label_num);
(void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE,
" Uberblock[%d]\n", i);
dump_uberblock(ub, header, "");
print_label_numbers(" labels = ", rec);
}
}
static char curpath[PATH_MAX];
/*
* Iterate through the path components, recursively passing
* current one's obj and remaining path until we find the obj
* for the last one.
*/
static int
dump_path_impl(objset_t *os, uint64_t obj, char *name, uint64_t *retobj)
{
int err;
boolean_t header = B_TRUE;
uint64_t child_obj;
char *s;
dmu_buf_t *db;
dmu_object_info_t doi;
if ((s = strchr(name, '/')) != NULL)
*s = '\0';
err = zap_lookup(os, obj, name, 8, 1, &child_obj);
(void) strlcat(curpath, name, sizeof (curpath));
if (err != 0) {
(void) fprintf(stderr, "failed to lookup %s: %s\n",
curpath, strerror(err));
return (err);
}
child_obj = ZFS_DIRENT_OBJ(child_obj);
err = sa_buf_hold(os, child_obj, FTAG, &db);
if (err != 0) {
(void) fprintf(stderr,
"failed to get SA dbuf for obj %llu: %s\n",
(u_longlong_t)child_obj, strerror(err));
return (EINVAL);
}
dmu_object_info_from_db(db, &doi);
sa_buf_rele(db, FTAG);
if (doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) {
(void) fprintf(stderr, "invalid bonus type %d for obj %llu\n",
doi.doi_bonus_type, (u_longlong_t)child_obj);
return (EINVAL);
}
if (dump_opt['v'] > 6) {
(void) printf("obj=%llu %s type=%d bonustype=%d\n",
(u_longlong_t)child_obj, curpath, doi.doi_type,
doi.doi_bonus_type);
}
(void) strlcat(curpath, "/", sizeof (curpath));
switch (doi.doi_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (s != NULL && *(s + 1) != '\0')
return (dump_path_impl(os, child_obj, s + 1, retobj));
- /* FALLTHROUGH */
+ fallthrough;
case DMU_OT_PLAIN_FILE_CONTENTS:
if (retobj != NULL) {
*retobj = child_obj;
} else {
dump_object(os, child_obj, dump_opt['v'], &header,
NULL, 0);
}
return (0);
default:
(void) fprintf(stderr, "object %llu has non-file/directory "
"type %d\n", (u_longlong_t)obj, doi.doi_type);
break;
}
return (EINVAL);
}
/*
* Dump the blocks for the object specified by path inside the dataset.
*/
static int
dump_path(char *ds, char *path, uint64_t *retobj)
{
int err;
objset_t *os;
uint64_t root_obj;
err = open_objset(ds, FTAG, &os);
if (err != 0)
return (err);
err = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, &root_obj);
if (err != 0) {
(void) fprintf(stderr, "can't lookup root znode: %s\n",
strerror(err));
close_objset(os, FTAG);
return (EINVAL);
}
(void) snprintf(curpath, sizeof (curpath), "dataset=%s path=/", ds);
err = dump_path_impl(os, root_obj, path, retobj);
close_objset(os, FTAG);
return (err);
}
static int
zdb_copy_object(objset_t *os, uint64_t srcobj, char *destfile)
{
int err = 0;
uint64_t size, readsize, oursize, offset;
ssize_t writesize;
sa_handle_t *hdl;
(void) printf("Copying object %" PRIu64 " to file %s\n", srcobj,
destfile);
VERIFY3P(os, ==, sa_os);
if ((err = sa_handle_get(os, srcobj, NULL, SA_HDL_PRIVATE, &hdl))) {
(void) printf("Failed to get handle for SA znode\n");
return (err);
}
if ((err = sa_lookup(hdl, sa_attr_table[ZPL_SIZE], &size, 8))) {
(void) sa_handle_destroy(hdl);
return (err);
}
(void) sa_handle_destroy(hdl);
(void) printf("Object %" PRIu64 " is %" PRIu64 " bytes\n", srcobj,
size);
if (size == 0) {
return (EINVAL);
}
int fd = open(destfile, O_WRONLY | O_CREAT | O_TRUNC, 0644);
/*
* We cap the size at 1 mebibyte here to prevent
* allocation failures and nigh-infinite printing if the
* object is extremely large.
*/
oursize = MIN(size, 1 << 20);
offset = 0;
char *buf = kmem_alloc(oursize, KM_NOSLEEP);
if (buf == NULL) {
return (ENOMEM);
}
while (offset < size) {
readsize = MIN(size - offset, 1 << 20);
err = dmu_read(os, srcobj, offset, readsize, buf, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(buf, oursize);
return (err);
}
if (dump_opt['v'] > 3) {
(void) printf("Read offset=%" PRIu64 " size=%" PRIu64
" error=%d\n", offset, readsize, err);
}
writesize = write(fd, buf, readsize);
if (writesize < 0) {
err = errno;
break;
} else if (writesize != readsize) {
/* Incomplete write */
(void) fprintf(stderr, "Short write, only wrote %llu of"
" %" PRIu64 " bytes, exiting...\n",
(u_longlong_t)writesize, readsize);
break;
}
offset += readsize;
}
(void) close(fd);
if (buf != NULL)
kmem_free(buf, oursize);
return (err);
}
static int
dump_label(const char *dev)
{
char path[MAXPATHLEN];
zdb_label_t labels[VDEV_LABELS];
uint64_t psize, ashift, l2cache;
struct stat64 statbuf;
boolean_t config_found = B_FALSE;
boolean_t error = B_FALSE;
boolean_t read_l2arc_header = B_FALSE;
avl_tree_t config_tree;
avl_tree_t uberblock_tree;
void *node, *cookie;
int fd;
bzero(labels, sizeof (labels));
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(path, dev, sizeof (path));
if (dev[0] != '/' && stat64(path, &statbuf) != 0) {
int error;
error = zfs_resolve_shortname(dev, path, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(path)) {
if (zfs_append_partition(path, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || (stat64(path, &statbuf) != 0)) {
(void) printf("failed to find device %s, try "
"specifying absolute path instead\n", dev);
return (1);
}
}
if ((fd = open64(path, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", path, strerror(errno));
exit(1);
}
if (fstat64_blk(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", path,
strerror(errno));
(void) close(fd);
exit(1);
}
if (S_ISBLK(statbuf.st_mode) && zfs_dev_flush(fd) != 0)
(void) printf("failed to invalidate cache '%s' : %s\n", path,
strerror(errno));
avl_create(&config_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
avl_create(&uberblock_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
psize = statbuf.st_size;
psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t));
ashift = SPA_MINBLOCKSHIFT;
/*
* 1. Read the label from disk
* 2. Unpack the configuration and insert in config tree.
* 3. Traverse all uberblocks and insert in uberblock tree.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
char *buf = label->label.vl_vdev_phys.vp_nvlist;
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
nvlist_t *config;
cksum_record_t *rec;
zio_cksum_t cksum;
vdev_t vd;
if (pread64(fd, &label->label, sizeof (label->label),
vdev_label_offset(psize, l, 0)) != sizeof (label->label)) {
if (!dump_opt['q'])
(void) printf("failed to read label %d\n", l);
label->read_failed = B_TRUE;
error = B_TRUE;
continue;
}
label->read_failed = B_FALSE;
if (nvlist_unpack(buf, buflen, &config, 0) == 0) {
nvlist_t *vdev_tree = NULL;
size_t size;
if ((nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) ||
(nvlist_lookup_uint64(vdev_tree,
ZPOOL_CONFIG_ASHIFT, &ashift) != 0))
ashift = SPA_MINBLOCKSHIFT;
if (nvlist_size(config, &size, NV_ENCODE_XDR) != 0)
size = buflen;
/* If the device is a cache device clear the header. */
if (!read_l2arc_header) {
if (nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_STATE, &l2cache) == 0 &&
l2cache == POOL_STATE_L2CACHE) {
read_l2arc_header = B_TRUE;
}
}
fletcher_4_native_varsize(buf, size, &cksum);
rec = cksum_record_insert(&config_tree, &cksum, l);
label->config = rec;
label->config_nv = config;
config_found = B_TRUE;
} else {
error = B_TRUE;
}
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)label + uoff);
if (uberblock_verify(ub))
continue;
fletcher_4_native_varsize(ub, sizeof (*ub), &cksum);
rec = cksum_record_insert(&uberblock_tree, &cksum, l);
label->uberblocks[i] = rec;
}
}
/*
* Dump the label and uberblocks.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
zdb_label_t *label = &labels[l];
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
if (label->read_failed == B_TRUE)
continue;
if (label->config_nv) {
dump_config_from_label(label, buflen, l);
} else {
if (!dump_opt['q'])
(void) printf("failed to unpack label %d\n", l);
}
if (dump_opt['u'])
dump_label_uberblocks(label, ashift, l);
nvlist_free(label->config_nv);
}
/*
* Dump the L2ARC header, if existent.
*/
if (read_l2arc_header)
error |= dump_l2arc_header(fd);
cookie = NULL;
while ((node = avl_destroy_nodes(&config_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
cookie = NULL;
while ((node = avl_destroy_nodes(&uberblock_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
avl_destroy(&config_tree);
avl_destroy(&uberblock_tree);
(void) close(fd);
return (config_found == B_FALSE ? 2 :
(error == B_TRUE ? 1 : 0));
}
static uint64_t dataset_feature_count[SPA_FEATURES];
static uint64_t global_feature_count[SPA_FEATURES];
static uint64_t remap_deadlist_count = 0;
/*ARGSUSED*/
static int
dump_one_objset(const char *dsname, void *arg)
{
int error;
objset_t *os;
spa_feature_t f;
error = open_objset(dsname, FTAG, &os);
if (error != 0)
return (0);
for (f = 0; f < SPA_FEATURES; f++) {
if (!dsl_dataset_feature_is_active(dmu_objset_ds(os), f))
continue;
ASSERT(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET);
dataset_feature_count[f]++;
}
if (dsl_dataset_remap_deadlist_exists(dmu_objset_ds(os))) {
remap_deadlist_count++;
}
for (dsl_bookmark_node_t *dbn =
avl_first(&dmu_objset_ds(os)->ds_bookmarks); dbn != NULL;
dbn = AVL_NEXT(&dmu_objset_ds(os)->ds_bookmarks, dbn)) {
mos_obj_refd(dbn->dbn_phys.zbm_redaction_obj);
if (dbn->dbn_phys.zbm_redaction_obj != 0)
global_feature_count[SPA_FEATURE_REDACTION_BOOKMARKS]++;
if (dbn->dbn_phys.zbm_flags & ZBM_FLAG_HAS_FBN)
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN]++;
}
if (dsl_deadlist_is_open(&dmu_objset_ds(os)->ds_dir->dd_livelist) &&
!dmu_objset_is_snapshot(os)) {
global_feature_count[SPA_FEATURE_LIVELIST]++;
}
dump_objset(os);
close_objset(os, FTAG);
fuid_table_destroy();
return (0);
}
/*
* Block statistics.
*/
#define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2)
typedef struct zdb_blkstats {
uint64_t zb_asize;
uint64_t zb_lsize;
uint64_t zb_psize;
uint64_t zb_count;
uint64_t zb_gangs;
uint64_t zb_ditto_samevdev;
uint64_t zb_ditto_same_ms;
uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE];
} zdb_blkstats_t;
/*
* Extended object types to report deferred frees and dedup auto-ditto blocks.
*/
#define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0)
#define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1)
#define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2)
#define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3)
static const char *zdb_ot_extname[] = {
"deferred free",
"dedup ditto",
"other",
"Total",
};
#define ZB_TOTAL DN_MAX_LEVELS
#define SPA_MAX_FOR_16M (SPA_MAXBLOCKSHIFT+1)
typedef struct zdb_cb {
zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1];
uint64_t zcb_removing_size;
uint64_t zcb_checkpoint_size;
uint64_t zcb_dedup_asize;
uint64_t zcb_dedup_blocks;
uint64_t zcb_psize_count[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_count[SPA_MAX_FOR_16M];
uint64_t zcb_asize_count[SPA_MAX_FOR_16M];
uint64_t zcb_psize_len[SPA_MAX_FOR_16M];
uint64_t zcb_lsize_len[SPA_MAX_FOR_16M];
uint64_t zcb_asize_len[SPA_MAX_FOR_16M];
uint64_t zcb_psize_total;
uint64_t zcb_lsize_total;
uint64_t zcb_asize_total;
uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES];
uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES]
[BPE_PAYLOAD_SIZE + 1];
uint64_t zcb_start;
hrtime_t zcb_lastprint;
uint64_t zcb_totalasize;
uint64_t zcb_errors[256];
int zcb_readfails;
int zcb_haderrors;
spa_t *zcb_spa;
uint32_t **zcb_vd_obsolete_counts;
} zdb_cb_t;
/* test if two DVA offsets from same vdev are within the same metaslab */
static boolean_t
same_metaslab(spa_t *spa, uint64_t vdev, uint64_t off1, uint64_t off2)
{
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t ms_shift = vd->vdev_ms_shift;
return ((off1 >> ms_shift) == (off2 >> ms_shift));
}
/*
* Used to simplify reporting of the histogram data.
*/
typedef struct one_histo {
char *name;
uint64_t *count;
uint64_t *len;
uint64_t cumulative;
} one_histo_t;
/*
* The number of separate histograms processed for psize, lsize and asize.
*/
#define NUM_HISTO 3
/*
* This routine will create a fixed column size output of three different
* histograms showing by blocksize of 512 - 2^ SPA_MAX_FOR_16M
* the count, length and cumulative length of the psize, lsize and
* asize blocks.
*
* All three types of blocks are listed on a single line
*
* By default the table is printed in nicenumber format (e.g. 123K) but
* if the '-P' parameter is specified then the full raw number (parseable)
* is printed out.
*/
static void
dump_size_histograms(zdb_cb_t *zcb)
{
/*
* A temporary buffer that allows us to convert a number into
* a string using zdb_nicenumber to allow either raw or human
* readable numbers to be output.
*/
char numbuf[32];
/*
* Define titles which are used in the headers of the tables
* printed by this routine.
*/
const char blocksize_title1[] = "block";
const char blocksize_title2[] = "size";
const char count_title[] = "Count";
const char length_title[] = "Size";
const char cumulative_title[] = "Cum.";
/*
* Setup the histogram arrays (psize, lsize, and asize).
*/
one_histo_t parm_histo[NUM_HISTO];
parm_histo[0].name = "psize";
parm_histo[0].count = zcb->zcb_psize_count;
parm_histo[0].len = zcb->zcb_psize_len;
parm_histo[0].cumulative = 0;
parm_histo[1].name = "lsize";
parm_histo[1].count = zcb->zcb_lsize_count;
parm_histo[1].len = zcb->zcb_lsize_len;
parm_histo[1].cumulative = 0;
parm_histo[2].name = "asize";
parm_histo[2].count = zcb->zcb_asize_count;
parm_histo[2].len = zcb->zcb_asize_len;
parm_histo[2].cumulative = 0;
(void) printf("\nBlock Size Histogram\n");
/*
* Print the first line titles
*/
if (dump_opt['P'])
(void) printf("\n%s\t", blocksize_title1);
else
(void) printf("\n%7s ", blocksize_title1);
for (int j = 0; j < NUM_HISTO; j++) {
if (dump_opt['P']) {
if (j < NUM_HISTO - 1) {
(void) printf("%s\t\t\t", parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf(" %s", parm_histo[j].name);
}
} else {
if (j < NUM_HISTO - 1) {
/* Left aligned strings in the output */
(void) printf("%-7s ",
parm_histo[j].name);
} else {
/* Don't print trailing spaces */
(void) printf("%s", parm_histo[j].name);
}
}
}
(void) printf("\n");
/*
* Print the second line titles
*/
if (dump_opt['P']) {
(void) printf("%s\t", blocksize_title2);
} else {
(void) printf("%7s ", blocksize_title2);
}
for (int i = 0; i < NUM_HISTO; i++) {
if (dump_opt['P']) {
(void) printf("%s\t%s\t%s\t",
count_title, length_title, cumulative_title);
} else {
(void) printf("%7s%7s%7s",
count_title, length_title, cumulative_title);
}
}
(void) printf("\n");
/*
* Print the rows
*/
for (int i = SPA_MINBLOCKSHIFT; i < SPA_MAX_FOR_16M; i++) {
/*
* Print the first column showing the blocksize
*/
zdb_nicenum((1ULL << i), numbuf, sizeof (numbuf));
if (dump_opt['P']) {
printf("%s", numbuf);
} else {
printf("%7s:", numbuf);
}
/*
* Print the remaining set of 3 columns per size:
* for psize, lsize and asize
*/
for (int j = 0; j < NUM_HISTO; j++) {
parm_histo[j].cumulative += parm_histo[j].len[i];
zdb_nicenum(parm_histo[j].count[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].len[i],
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
zdb_nicenum(parm_histo[j].cumulative,
numbuf, sizeof (numbuf));
if (dump_opt['P'])
(void) printf("\t%s", numbuf);
else
(void) printf("%7s", numbuf);
}
(void) printf("\n");
}
}
static void
zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
dmu_object_type_t type)
{
uint64_t refcnt = 0;
int i;
ASSERT(type < ZDB_OT_TOTAL);
if (zilog && zil_bp_tree_add(zilog, bp) != 0)
return;
spa_config_enter(zcb->zcb_spa, SCL_CONFIG, FTAG, RW_READER);
for (i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL;
int t = (i & 1) ? type : ZDB_OT_TOTAL;
int equal;
zdb_blkstats_t *zb = &zcb->zcb_type[l][t];
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_count++;
/*
* The histogram is only big enough to record blocks up to
* SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last,
* "other", bucket.
*/
unsigned idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT;
idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1);
zb->zb_psize_histogram[idx]++;
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) {
zb->zb_ditto_samevdev++;
if (same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
}
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal != 0) {
zb->zb_ditto_samevdev++;
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[1])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[0]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
else if (DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]) &&
same_metaslab(zcb->zcb_spa,
DVA_GET_VDEV(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[1]),
DVA_GET_OFFSET(&bp->blk_dva[2])))
zb->zb_ditto_same_ms++;
}
break;
}
}
spa_config_exit(zcb->zcb_spa, SCL_CONFIG, FTAG);
if (BP_IS_EMBEDDED(bp)) {
zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++;
zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)]
[BPE_GET_PSIZE(bp)]++;
return;
}
/*
* The binning histogram bins by powers of two up to
* SPA_MAXBLOCKSIZE rather than creating bins for
* every possible blocksize found in the pool.
*/
int bin = highbit64(BP_GET_PSIZE(bp)) - 1;
zcb->zcb_psize_count[bin]++;
zcb->zcb_psize_len[bin] += BP_GET_PSIZE(bp);
zcb->zcb_psize_total += BP_GET_PSIZE(bp);
bin = highbit64(BP_GET_LSIZE(bp)) - 1;
zcb->zcb_lsize_count[bin]++;
zcb->zcb_lsize_len[bin] += BP_GET_LSIZE(bp);
zcb->zcb_lsize_total += BP_GET_LSIZE(bp);
bin = highbit64(BP_GET_ASIZE(bp)) - 1;
zcb->zcb_asize_count[bin]++;
zcb->zcb_asize_len[bin] += BP_GET_ASIZE(bp);
zcb->zcb_asize_total += BP_GET_ASIZE(bp);
if (dump_opt['L'])
return;
if (BP_GET_DEDUP(bp)) {
ddt_t *ddt;
ddt_entry_t *dde;
ddt = ddt_select(zcb->zcb_spa, bp);
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp, B_FALSE);
if (dde == NULL) {
refcnt = 0;
} else {
ddt_phys_t *ddp = ddt_phys_select(dde, bp);
ddt_phys_decref(ddp);
refcnt = ddp->ddp_refcnt;
if (ddt_phys_total_refcnt(dde) == 0)
ddt_remove(ddt, dde);
}
ddt_exit(ddt);
}
VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa,
refcnt ? 0 : spa_min_claim_txg(zcb->zcb_spa),
bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0);
}
static void
zdb_blkptr_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
int ioerr = zio->io_error;
zdb_cb_t *zcb = zio->io_private;
zbookmark_phys_t *zb = &zio->io_bookmark;
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
char blkbuf[BP_SPRINTF_LEN];
zcb->zcb_haderrors = 1;
zcb->zcb_errors[ioerr]++;
if (dump_opt['b'] >= 2)
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
else
blkbuf[0] = '\0';
(void) printf("zdb_blkptr_cb: "
"Got error %d reading "
"<%llu, %llu, %lld, %llx> %s -- skipping\n",
ioerr,
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level,
(u_longlong_t)zb->zb_blkid,
blkbuf);
}
mutex_exit(&spa->spa_scrub_lock);
abd_free(zio->io_abd);
}
static int
zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
zdb_cb_t *zcb = arg;
dmu_object_type_t type;
boolean_t is_metadata;
if (zb->zb_level == ZB_DNODE_LEVEL)
return (0);
if (dump_opt['b'] >= 5 && bp->blk_birth > 0) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("objset %llu object %llu "
"level %lld offset 0x%llx %s\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(u_longlong_t)blkid2offset(dnp, bp, zb),
blkbuf);
}
if (BP_IS_HOLE(bp) || BP_IS_REDACTED(bp))
return (0);
type = BP_GET_TYPE(bp);
zdb_count_block(zcb, zilog, bp,
(type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type);
is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type));
if (!BP_IS_EMBEDDED(bp) &&
(dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) {
size_t size = BP_GET_PSIZE(bp);
abd_t *abd = abd_alloc(size, B_FALSE);
int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW;
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
flags |= ZIO_FLAG_SPECULATIVE;
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes > max_inflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(NULL, spa, bp, abd, size,
zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb));
}
zcb->zcb_readfails = 0;
/* only call gethrtime() every 100 blocks */
static int iters;
if (++iters > 100)
iters = 0;
else
return (0);
if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) {
uint64_t now = gethrtime();
char buf[10];
uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize;
int kb_per_sec =
1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000));
int sec_remaining =
(zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec;
/* make sure nicenum has enough space */
CTASSERT(sizeof (buf) >= NN_NUMBUF_SZ);
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"\r%5s completed (%4dMB/s) "
"estimated time remaining: %uhr %02umin %02usec ",
buf, kb_per_sec / 1024,
sec_remaining / 60 / 60,
sec_remaining / 60 % 60,
sec_remaining % 60);
zcb->zcb_lastprint = now;
}
return (0);
}
static void
zdb_leak(void *arg, uint64_t start, uint64_t size)
{
vdev_t *vd = arg;
(void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
(u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size);
}
static metaslab_ops_t zdb_metaslab_ops = {
NULL /* alloc */
};
/* ARGSUSED */
static int
load_unflushed_svr_segs_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
spa_vdev_removal_t *svr = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
/* skip vdevs we don't care about */
if (sme->sme_vdev != svr->svr_vdev_id)
return (0);
vdev_t *vd = vdev_lookup_top(spa, sme->sme_vdev);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
range_tree_add(svr->svr_allocd_segs, offset, size);
else
range_tree_remove(svr->svr_allocd_segs, offset, size);
return (0);
}
/* ARGSUSED */
static void
claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
uint64_t size, void *arg)
{
/*
* This callback was called through a remap from
* a device being removed. Therefore, the vdev that
* this callback is applied to is a concrete
* vdev.
*/
ASSERT(vdev_is_concrete(vd));
VERIFY0(metaslab_claim_impl(vd, offset, size,
spa_min_claim_txg(vd->vdev_spa)));
}
static void
claim_segment_cb(void *arg, uint64_t offset, uint64_t size)
{
vdev_t *vd = arg;
vdev_indirect_ops.vdev_op_remap(vd, offset, size,
claim_segment_impl_cb, NULL);
}
/*
* After accounting for all allocated blocks that are directly referenced,
* we might have missed a reference to a block from a partially complete
* (and thus unused) indirect mapping object. We perform a secondary pass
* through the metaslabs we have already mapped and claim the destination
* blocks.
*/
static void
zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return;
if (spa->spa_vdev_removal == NULL)
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_vdev_removal_t *svr = spa->spa_vdev_removal;
vdev_t *vd = vdev_lookup_top(spa, svr->svr_vdev_id);
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
ASSERT0(range_tree_space(svr->svr_allocd_segs));
range_tree_t *allocs = range_tree_create(NULL, RANGE_SEG64, NULL, 0, 0);
for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
metaslab_t *msp = vd->vdev_ms[msi];
ASSERT0(range_tree_space(allocs));
if (msp->ms_sm != NULL)
VERIFY0(space_map_load(msp->ms_sm, allocs, SM_ALLOC));
range_tree_vacate(allocs, range_tree_add, svr->svr_allocd_segs);
}
range_tree_destroy(allocs);
iterate_through_spacemap_logs(spa, load_unflushed_svr_segs_cb, svr);
/*
* Clear everything past what has been synced,
* because we have not allocated mappings for
* it yet.
*/
range_tree_clear(svr->svr_allocd_segs,
vdev_indirect_mapping_max_offset(vim),
vd->vdev_asize - vdev_indirect_mapping_max_offset(vim));
zcb->zcb_removing_size += range_tree_space(svr->svr_allocd_segs);
range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
/* ARGSUSED */
static int
increment_indirect_mapping_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
zdb_cb_t *zcb = arg;
spa_t *spa = zcb->zcb_spa;
vdev_t *vd;
const dva_t *dva = &bp->blk_dva[0];
ASSERT(!bp_freed);
ASSERT(!dump_opt['L']);
ASSERT3U(BP_GET_NDVAS(bp), ==, 1);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
vd = vdev_lookup_top(zcb->zcb_spa, DVA_GET_VDEV(dva));
ASSERT3P(vd, !=, NULL);
spa_config_exit(spa, SCL_VDEV, FTAG);
ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
ASSERT3P(zcb->zcb_vd_obsolete_counts[vd->vdev_id], !=, NULL);
vdev_indirect_mapping_increment_obsolete_count(
vd->vdev_indirect_mapping,
DVA_GET_OFFSET(dva), DVA_GET_ASIZE(dva),
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
return (0);
}
static uint32_t *
zdb_load_obsolete_counts(vdev_t *vd)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
spa_t *spa = vd->vdev_spa;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
uint64_t obsolete_sm_object;
uint32_t *counts;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
EQUIV(obsolete_sm_object != 0, vd->vdev_obsolete_sm != NULL);
counts = vdev_indirect_mapping_load_obsolete_counts(vim);
if (vd->vdev_obsolete_sm != NULL) {
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
vd->vdev_obsolete_sm);
}
if (scip->scip_vdev == vd->vdev_id &&
scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
prev_obsolete_sm);
space_map_close(prev_obsolete_sm);
}
return (counts);
}
static void
zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
ddt_bookmark_t ddb;
ddt_entry_t dde;
int error;
int p;
ASSERT(!dump_opt['L']);
bzero(&ddb, sizeof (ddb));
while ((error = ddt_walk(spa, &ddb, &dde)) == 0) {
blkptr_t blk;
ddt_phys_t *ddp = dde.dde_phys;
if (ddb.ddb_class == DDT_CLASS_UNIQUE)
return;
ASSERT(ddt_phys_total_refcnt(&dde) > 1);
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddb.ddb_checksum,
&dde.dde_key, ddp, &blk);
if (p == DDT_PHYS_DITTO) {
zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO);
} else {
zcb->zcb_dedup_asize +=
BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1);
zcb->zcb_dedup_blocks++;
}
}
ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum];
ddt_enter(ddt);
VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL);
ddt_exit(ddt);
}
ASSERT(error == ENOENT);
}
typedef struct checkpoint_sm_exclude_entry_arg {
vdev_t *cseea_vd;
uint64_t cseea_checkpoint_size;
} checkpoint_sm_exclude_entry_arg_t;
static int
checkpoint_sm_exclude_entry_cb(space_map_entry_t *sme, void *arg)
{
checkpoint_sm_exclude_entry_arg_t *cseea = arg;
vdev_t *vd = cseea->cseea_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
/*
* Since the vdev_checkpoint_sm exists in the vdev level
* and the ms_sm space maps exist in the metaslab level,
* an entry in the checkpoint space map could theoretically
* cross the boundaries of the metaslab that it belongs.
*
* In reality, because of the way that we populate and
* manipulate the checkpoint's space maps currently,
* there shouldn't be any entries that cross metaslabs.
* Hence the assertion below.
*
* That said, there is no fundamental requirement that
* the checkpoint's space map entries should not cross
* metaslab boundaries. So if needed we could add code
* that handles metaslab-crossing segments in the future.
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* By removing the entry from the allocated segments we
* also verify that the entry is there to begin with.
*/
mutex_enter(&ms->ms_lock);
range_tree_remove(ms->ms_allocatable, sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
cseea->cseea_checkpoint_size += sme->sme_run;
return (0);
}
static void
zdb_leak_init_vdev_exclude_checkpoint(vdev_t *vd, zdb_cb_t *zcb)
{
spa_t *spa = vd->vdev_spa;
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
/*
* If there is no vdev_top_zap, we are in a pool whose
* version predates the pool checkpoint feature.
*/
if (vd->vdev_top_zap == 0)
return;
/*
* If there is no reference of the vdev_checkpoint_sm in
* the vdev_top_zap, then one of the following scenarios
* is true:
*
* 1] There is no checkpoint
* 2] There is a checkpoint, but no checkpointed blocks
* have been freed yet
* 3] The current vdev is indirect
*
* In these cases we return immediately.
*/
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
return;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM, sizeof (uint64_t), 1,
&checkpoint_sm_obj));
checkpoint_sm_exclude_entry_arg_t cseea;
cseea.cseea_vd = vd;
cseea.cseea_checkpoint_size = 0;
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
checkpoint_sm_exclude_entry_cb, &cseea));
space_map_close(checkpoint_sm);
zcb->zcb_checkpoint_size += cseea.cseea_checkpoint_size;
}
static void
zdb_leak_init_exclude_checkpoint(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
ASSERT3U(c, ==, rvd->vdev_child[c]->vdev_id);
zdb_leak_init_vdev_exclude_checkpoint(rvd->vdev_child[c], zcb);
}
}
static int
count_unflushed_space_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
int64_t *ualloc_space = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (sme->sme_type == SM_ALLOC)
*ualloc_space += sme->sme_run;
else
*ualloc_space -= sme->sme_run;
return (0);
}
static int64_t
get_unflushed_alloc_space(spa_t *spa)
{
if (dump_opt['L'])
return (0);
int64_t ualloc_space = 0;
iterate_through_spacemap_logs(spa, count_unflushed_space_cb,
&ualloc_space);
return (ualloc_space);
}
static int
load_unflushed_cb(spa_t *spa, space_map_entry_t *sme, uint64_t txg, void *arg)
{
maptype_t *uic_maptype = arg;
uint64_t offset = sme->sme_offset;
uint64_t size = sme->sme_run;
uint64_t vdev_id = sme->sme_vdev;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
/* skip indirect vdevs */
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
ASSERT(*uic_maptype == SM_ALLOC || *uic_maptype == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
if (*uic_maptype == sme->sme_type)
range_tree_add(ms->ms_allocatable, offset, size);
else
range_tree_remove(ms->ms_allocatable, offset, size);
return (0);
}
static void
load_unflushed_to_ms_allocatables(spa_t *spa, maptype_t maptype)
{
iterate_through_spacemap_logs(spa, load_unflushed_cb, &maptype);
}
static void
load_concrete_ms_allocatable_trees(spa_t *spa, maptype_t maptype)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
ASSERT3U(i, ==, vd->vdev_id);
if (vd->vdev_ops == &vdev_indirect_ops)
continue;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
(void) fprintf(stderr,
"\rloading concrete vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)msp->ms_id,
(longlong_t)vd->vdev_ms_count);
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
if (msp->ms_sm != NULL) {
VERIFY0(space_map_load(msp->ms_sm,
msp->ms_allocatable, maptype));
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
}
load_unflushed_to_ms_allocatables(spa, maptype);
}
/*
* vm_idxp is an in-out parameter which (for indirect vdevs) is the
* index in vim_entries that has the first entry in this metaslab.
* On return, it will be set to the first entry after this metaslab.
*/
static void
load_indirect_ms_allocatable_tree(vdev_t *vd, metaslab_t *msp,
uint64_t *vim_idxp)
{
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
mutex_enter(&msp->ms_lock);
range_tree_vacate(msp->ms_allocatable, NULL, NULL);
/*
* We don't want to spend the CPU manipulating the
* size-ordered tree, so clear the range_tree ops.
*/
msp->ms_allocatable->rt_ops = NULL;
for (; *vim_idxp < vdev_indirect_mapping_num_entries(vim);
(*vim_idxp)++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[*vim_idxp];
uint64_t ent_offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
uint64_t ent_len = DVA_GET_ASIZE(&vimep->vimep_dst);
ASSERT3U(ent_offset, >=, msp->ms_start);
if (ent_offset >= msp->ms_start + msp->ms_size)
break;
/*
* Mappings do not cross metaslab boundaries,
* because we create them by walking the metaslabs.
*/
ASSERT3U(ent_offset + ent_len, <=,
msp->ms_start + msp->ms_size);
range_tree_add(msp->ms_allocatable, ent_offset, ent_len);
}
if (!msp->ms_loaded)
msp->ms_loaded = B_TRUE;
mutex_exit(&msp->ms_lock);
}
static void
zdb_leak_init_prepare_indirect_vdevs(spa_t *spa, zdb_cb_t *zcb)
{
ASSERT(!dump_opt['L']);
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
ASSERT3U(c, ==, vd->vdev_id);
if (vd->vdev_ops != &vdev_indirect_ops)
continue;
/*
* Note: we don't check for mapping leaks on
* removing vdevs because their ms_allocatable's
* are used to look for leaks in allocated space.
*/
zcb->zcb_vd_obsolete_counts[c] = zdb_load_obsolete_counts(vd);
/*
* Normally, indirect vdevs don't have any
* metaslabs. We want to set them up for
* zio_claim().
*/
vdev_metaslab_group_create(vd);
VERIFY0(vdev_metaslab_init(vd, 0));
vdev_indirect_mapping_t *vim __maybe_unused =
vd->vdev_indirect_mapping;
uint64_t vim_idx = 0;
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
(void) fprintf(stderr,
"\rloading indirect vdev %llu, "
"metaslab %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vd->vdev_ms[m]->ms_id,
(longlong_t)vd->vdev_ms_count);
load_indirect_ms_allocatable_tree(vd, vd->vdev_ms[m],
&vim_idx);
}
ASSERT3U(vim_idx, ==, vdev_indirect_mapping_num_entries(vim));
}
}
static void
zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
zcb->zcb_spa = spa;
if (dump_opt['L'])
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We are going to be changing the meaning of the metaslab's
* ms_allocatable. Ensure that the allocator doesn't try to
* use the tree.
*/
spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
spa->spa_embedded_log_class->mc_ops = &zdb_metaslab_ops;
zcb->zcb_vd_obsolete_counts =
umem_zalloc(rvd->vdev_children * sizeof (uint32_t *),
UMEM_NOFAIL);
/*
* For leak detection, we overload the ms_allocatable trees
* to contain allocated segments instead of free segments.
* As a result, we can't use the normal metaslab_load/unload
* interfaces.
*/
zdb_leak_init_prepare_indirect_vdevs(spa, zcb);
load_concrete_ms_allocatable_trees(spa, SM_ALLOC);
/*
* On load_concrete_ms_allocatable_trees() we loaded all the
* allocated entries from the ms_sm to the ms_allocatable for
* each metaslab. If the pool has a checkpoint or is in the
* middle of discarding a checkpoint, some of these blocks
* may have been freed but their ms_sm may not have been
* updated because they are referenced by the checkpoint. In
* order to avoid false-positives during leak-detection, we
* go through the vdev's checkpoint space map and exclude all
* its entries from their relevant ms_allocatable.
*
* We also aggregate the space held by the checkpoint and add
* it to zcb_checkpoint_size.
*
* Note that at this point we are also verifying that all the
* entries on the checkpoint_sm are marked as allocated in
* the ms_sm of their relevant metaslab.
* [see comment in checkpoint_sm_exclude_entry_cb()]
*/
zdb_leak_init_exclude_checkpoint(spa, zcb);
ASSERT3U(zcb->zcb_checkpoint_size, ==, spa_get_checkpoint_space(spa));
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
(void) bpobj_iterate_nofree(&dp->dp_obsolete_bpobj,
increment_indirect_mapping_cb, zcb, NULL);
}
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
zdb_ddt_leak_init(spa, zcb);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
static boolean_t
zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
{
boolean_t leaks = B_FALSE;
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
uint64_t total_leaked = 0;
boolean_t are_precise = B_FALSE;
ASSERT(vim != NULL);
for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
vdev_indirect_mapping_entry_phys_t *vimep =
&vim->vim_entries[i];
uint64_t obsolete_bytes = 0;
uint64_t offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
/*
* This is not very efficient but it's easy to
* verify correctness.
*/
for (uint64_t inner_offset = 0;
inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
inner_offset += 1 << vd->vdev_ashift) {
if (range_tree_contains(msp->ms_allocatable,
offset + inner_offset, 1 << vd->vdev_ashift)) {
obsolete_bytes += 1 << vd->vdev_ashift;
}
}
int64_t bytes_leaked = obsolete_bytes -
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i];
ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=,
zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]);
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (bytes_leaked != 0 && (are_precise || dump_opt['d'] >= 5)) {
(void) printf("obsolete indirect mapping count "
"mismatch on %llu:%llx:%llx : %llx bytes leaked\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
(u_longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
(u_longlong_t)bytes_leaked);
}
total_leaked += ABS(bytes_leaked);
}
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (!are_precise && total_leaked > 0) {
int pct_leaked = total_leaked * 100 /
vdev_indirect_mapping_bytes_mapped(vim);
(void) printf("cannot verify obsolete indirect mapping "
"counts of vdev %llu because precise feature was not "
"enabled when it was removed: %d%% (%llx bytes) of mapping"
"unreferenced\n",
(u_longlong_t)vd->vdev_id, pct_leaked,
(u_longlong_t)total_leaked);
} else if (total_leaked > 0) {
(void) printf("obsolete indirect mapping count mismatch "
"for vdev %llu -- %llx total bytes mismatched\n",
(u_longlong_t)vd->vdev_id,
(u_longlong_t)total_leaked);
leaks |= B_TRUE;
}
vdev_indirect_mapping_free_obsolete_counts(vim,
zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
zcb->zcb_vd_obsolete_counts[vd->vdev_id] = NULL;
return (leaks);
}
static boolean_t
zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
{
if (dump_opt['L'])
return (B_FALSE);
boolean_t leaks = B_FALSE;
vdev_t *rvd = spa->spa_root_vdev;
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (zcb->zcb_vd_obsolete_counts[c] != NULL) {
leaks |= zdb_check_for_obsolete_leaks(vd, zcb);
}
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(msp->ms_group, ==, (msp->ms_group->mg_class ==
spa_embedded_log_class(spa)) ?
vd->vdev_log_mg : vd->vdev_mg);
/*
* ms_allocatable has been overloaded
* to contain allocated segments. Now that
* we finished traversing all blocks, any
* block that remains in the ms_allocatable
* represents an allocated block that we
* did not claim during the traversal.
* Claimed blocks would have been removed
* from the ms_allocatable. For indirect
* vdevs, space remaining in the tree
* represents parts of the mapping that are
* not referenced, which is not a bug.
*/
if (vd->vdev_ops == &vdev_indirect_ops) {
range_tree_vacate(msp->ms_allocatable,
NULL, NULL);
} else {
range_tree_vacate(msp->ms_allocatable,
zdb_leak, vd);
}
if (msp->ms_loaded) {
msp->ms_loaded = B_FALSE;
}
}
}
umem_free(zcb->zcb_vd_obsolete_counts,
rvd->vdev_children * sizeof (uint32_t *));
zcb->zcb_vd_obsolete_counts = NULL;
return (leaks);
}
/* ARGSUSED */
static int
count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
zdb_cb_t *zcb = arg;
if (dump_opt['b'] >= 5) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("[%s] %s\n",
"deferred free", blkbuf);
}
zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED);
return (0);
}
/*
* Iterate over livelists which have been destroyed by the user but
* are still present in the MOS, waiting to be freed
*/
static void
iterate_deleted_livelists(spa_t *spa, ll_iter_t func, void *arg)
{
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
ASSERT0(err);
zap_cursor_t zc;
zap_attribute_t attr;
dsl_deadlist_t ll;
/* NULL out os prior to dsl_deadlist_open in case it's garbage */
ll.dl_os = NULL;
for (zap_cursor_init(&zc, mos, zap_obj);
zap_cursor_retrieve(&zc, &attr) == 0;
(void) zap_cursor_advance(&zc)) {
dsl_deadlist_open(&ll, mos, attr.za_first_integer);
func(&ll, arg);
dsl_deadlist_close(&ll);
}
zap_cursor_fini(&zc);
}
static int
bpobj_count_block_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (count_block_cb(arg, bp, tx));
}
static int
livelist_entry_count_blocks_cb(void *args, dsl_deadlist_entry_t *dle)
{
zdb_cb_t *zbc = args;
bplist_t blks;
bplist_create(&blks);
/* determine which blocks have been alloc'd but not freed */
VERIFY0(dsl_process_sub_livelist(&dle->dle_bpobj, &blks, NULL, NULL));
/* count those blocks */
(void) bplist_iterate(&blks, count_block_cb, zbc, NULL);
bplist_destroy(&blks);
return (0);
}
static void
livelist_count_blocks(dsl_deadlist_t *ll, void *arg)
{
dsl_deadlist_iterate(ll, livelist_entry_count_blocks_cb, arg);
}
/*
* Count the blocks in the livelists that have been destroyed by the user
* but haven't yet been freed.
*/
static void
deleted_livelists_count_blocks(spa_t *spa, zdb_cb_t *zbc)
{
iterate_deleted_livelists(spa, livelist_count_blocks, zbc);
}
static void
dump_livelist_cb(dsl_deadlist_t *ll, void *arg)
{
ASSERT3P(arg, ==, NULL);
global_feature_count[SPA_FEATURE_LIVELIST]++;
dump_blkptr_list(ll, "Deleted Livelist");
dsl_deadlist_iterate(ll, sublivelist_verify_lightweight, NULL);
}
/*
* Print out, register object references to, and increment feature counts for
* livelists that have been destroyed by the user but haven't yet been freed.
*/
static void
deleted_livelists_dump_mos(spa_t *spa)
{
uint64_t zap_obj;
objset_t *mos = spa->spa_meta_objset;
int err = zap_lookup(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, sizeof (uint64_t), 1, &zap_obj);
if (err == ENOENT)
return;
mos_obj_refd(zap_obj);
iterate_deleted_livelists(spa, dump_livelist_cb, NULL);
}
static int
dump_block_stats(spa_t *spa)
{
zdb_cb_t zcb;
zdb_blkstats_t *zb, *tzb;
uint64_t norm_alloc, norm_space, total_alloc, total_found;
int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT | TRAVERSE_HARD;
boolean_t leaks = B_FALSE;
int e, c, err;
bp_embedded_type_t i;
bzero(&zcb, sizeof (zcb));
(void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
(dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
(dump_opt['c'] == 1) ? "metadata " : "",
dump_opt['c'] ? "checksums " : "",
(dump_opt['c'] && !dump_opt['L']) ? "and verify " : "",
!dump_opt['L'] ? "nothing leaked " : "");
/*
* When leak detection is enabled we load all space maps as SM_ALLOC
* maps, then traverse the pool claiming each block we discover. If
* the pool is perfectly consistent, the segment trees will be empty
* when we're done. Anything left over is a leak; any block we can't
* claim (because it's not part of any space map) is a double
* allocation, reference to a freed block, or an unclaimed log block.
*
* When leak detection is disabled (-L option) we still traverse the
* pool claiming each block we discover, but we skip opening any space
* maps.
*/
bzero(&zcb, sizeof (zdb_cb_t));
zdb_leak_init(spa, &zcb);
/*
* If there's a deferred-free bplist, process that first.
*/
(void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj,
bpobj_count_block_cb, &zcb, NULL);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj,
bpobj_count_block_cb, &zcb, NULL);
}
zdb_claim_removing(spa, &zcb);
if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset,
spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb,
&zcb, NULL));
}
deleted_livelists_count_blocks(spa, &zcb);
if (dump_opt['c'] > 1)
flags |= TRAVERSE_PREFETCH_DATA;
zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
zcb.zcb_totalasize += metaslab_class_get_alloc(spa_special_class(spa));
zcb.zcb_totalasize += metaslab_class_get_alloc(spa_dedup_class(spa));
zcb.zcb_totalasize +=
metaslab_class_get_alloc(spa_embedded_log_class(spa));
zcb.zcb_start = zcb.zcb_lastprint = gethrtime();
err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
/*
* If we've traversed the data blocks then we need to wait for those
* I/Os to complete. We leverage "The Godfather" zio to wait on
* all async I/Os to complete.
*/
if (dump_opt['c']) {
for (c = 0; c < max_ncpus; c++) {
(void) zio_wait(spa->spa_async_zio_root[c]);
spa->spa_async_zio_root[c] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
}
ASSERT0(spa->spa_load_verify_bytes);
/*
* Done after zio_wait() since zcb_haderrors is modified in
* zdb_blkptr_done()
*/
zcb.zcb_haderrors |= err;
if (zcb.zcb_haderrors) {
(void) printf("\nError counts:\n\n");
(void) printf("\t%5s %s\n", "errno", "count");
for (e = 0; e < 256; e++) {
if (zcb.zcb_errors[e] != 0) {
(void) printf("\t%5d %llu\n",
e, (u_longlong_t)zcb.zcb_errors[e]);
}
}
}
/*
* Report any leaked segments.
*/
leaks |= zdb_leak_fini(spa, &zcb);
tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
norm_space = metaslab_class_get_space(spa_normal_class(spa));
total_alloc = norm_alloc +
metaslab_class_get_alloc(spa_log_class(spa)) +
metaslab_class_get_alloc(spa_embedded_log_class(spa)) +
metaslab_class_get_alloc(spa_special_class(spa)) +
metaslab_class_get_alloc(spa_dedup_class(spa)) +
get_unflushed_alloc_space(spa);
total_found = tzb->zb_asize - zcb.zcb_dedup_asize +
zcb.zcb_removing_size + zcb.zcb_checkpoint_size;
if (total_found == total_alloc && !dump_opt['L']) {
(void) printf("\n\tNo leaks (block sum matches space"
" maps exactly)\n");
} else if (!dump_opt['L']) {
(void) printf("block traversal size %llu != alloc %llu "
"(%s %lld)\n",
(u_longlong_t)total_found,
(u_longlong_t)total_alloc,
(dump_opt['L']) ? "unreachable" : "leaked",
(longlong_t)(total_alloc - total_found));
leaks = B_TRUE;
}
if (tzb->zb_count == 0)
return (2);
(void) printf("\n");
(void) printf("\t%-16s %14llu\n", "bp count:",
(u_longlong_t)tzb->zb_count);
(void) printf("\t%-16s %14llu\n", "ganged count:",
(longlong_t)tzb->zb_gangs);
(void) printf("\t%-16s %14llu avg: %6llu\n", "bp logical:",
(u_longlong_t)tzb->zb_lsize,
(u_longlong_t)(tzb->zb_lsize / tzb->zb_count));
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp physical:", (u_longlong_t)tzb->zb_psize,
(u_longlong_t)(tzb->zb_psize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_psize);
(void) printf("\t%-16s %14llu avg: %6llu compression: %6.2f\n",
"bp allocated:", (u_longlong_t)tzb->zb_asize,
(u_longlong_t)(tzb->zb_asize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_asize);
(void) printf("\t%-16s %14llu ref>1: %6llu deduplication: %6.2f\n",
"bp deduped:", (u_longlong_t)zcb.zcb_dedup_asize,
(u_longlong_t)zcb.zcb_dedup_blocks,
(double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0);
(void) printf("\t%-16s %14llu used: %5.2f%%\n", "Normal class:",
(u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space);
if (spa_special_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_special_class(spa));
uint64_t space = metaslab_class_get_space(
spa_special_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Special class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_dedup_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_dedup_class(spa));
uint64_t space = metaslab_class_get_space(
spa_dedup_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Dedup class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
if (spa_embedded_log_class(spa)->mc_allocator[0].mca_rotor != NULL) {
uint64_t alloc = metaslab_class_get_alloc(
spa_embedded_log_class(spa));
uint64_t space = metaslab_class_get_space(
spa_embedded_log_class(spa));
(void) printf("\t%-16s %14llu used: %5.2f%%\n",
"Embedded log class", (u_longlong_t)alloc,
100.0 * alloc / space);
}
for (i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) {
if (zcb.zcb_embedded_blocks[i] == 0)
continue;
(void) printf("\n");
(void) printf("\tadditional, non-pointer bps of type %u: "
"%10llu\n",
i, (u_longlong_t)zcb.zcb_embedded_blocks[i]);
if (dump_opt['b'] >= 3) {
(void) printf("\t number of (compressed) bytes: "
"number of bps\n");
dump_histogram(zcb.zcb_embedded_histogram[i],
sizeof (zcb.zcb_embedded_histogram[i]) /
sizeof (zcb.zcb_embedded_histogram[i][0]), 0);
}
}
if (tzb->zb_ditto_samevdev != 0) {
(void) printf("\tDittoed blocks on same vdev: %llu\n",
(longlong_t)tzb->zb_ditto_samevdev);
}
if (tzb->zb_ditto_same_ms != 0) {
(void) printf("\tDittoed blocks in same metaslab: %llu\n",
(longlong_t)tzb->zb_ditto_same_ms);
}
for (uint64_t v = 0; v < spa->spa_root_vdev->vdev_children; v++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[v];
vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
if (vim == NULL) {
continue;
}
char mem[32];
zdb_nicenum(vdev_indirect_mapping_num_entries(vim),
mem, vdev_indirect_mapping_size(vim));
(void) printf("\tindirect vdev id %llu has %llu segments "
"(%s in memory)\n",
(longlong_t)vd->vdev_id,
(longlong_t)vdev_indirect_mapping_num_entries(vim), mem);
}
if (dump_opt['b'] >= 2) {
int l, t, level;
(void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
"\t avg\t comp\t%%Total\tType\n");
for (t = 0; t <= ZDB_OT_TOTAL; t++) {
char csize[32], lsize[32], psize[32], asize[32];
char avg[32], gang[32];
const char *typename;
/* make sure nicenum has enough space */
CTASSERT(sizeof (csize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (psize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (avg) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (gang) >= NN_NUMBUF_SZ);
if (t < DMU_OT_NUMTYPES)
typename = dmu_ot[t].ot_name;
else
typename = zdb_ot_extname[t - DMU_OT_NUMTYPES];
if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) {
(void) printf("%6s\t%5s\t%5s\t%5s"
"\t%5s\t%5s\t%6s\t%s\n",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
typename);
continue;
}
for (l = ZB_TOTAL - 1; l >= -1; l--) {
level = (l == -1 ? ZB_TOTAL : l);
zb = &zcb.zcb_type[level][t];
if (zb->zb_asize == 0)
continue;
if (dump_opt['b'] < 3 && level != ZB_TOTAL)
continue;
if (level == 0 && zb->zb_asize ==
zcb.zcb_type[ZB_TOTAL][t].zb_asize)
continue;
zdb_nicenum(zb->zb_count, csize,
sizeof (csize));
zdb_nicenum(zb->zb_lsize, lsize,
sizeof (lsize));
zdb_nicenum(zb->zb_psize, psize,
sizeof (psize));
zdb_nicenum(zb->zb_asize, asize,
sizeof (asize));
zdb_nicenum(zb->zb_asize / zb->zb_count, avg,
sizeof (avg));
zdb_nicenum(zb->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
csize, lsize, psize, asize, avg,
(double)zb->zb_lsize / zb->zb_psize,
100.0 * zb->zb_asize / tzb->zb_asize);
if (level == ZB_TOTAL)
(void) printf("%s\n", typename);
else
(void) printf(" L%d %s\n",
level, typename);
if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) {
(void) printf("\t number of ganged "
"blocks: %s\n", gang);
}
if (dump_opt['b'] >= 4) {
(void) printf("psize "
"(in 512-byte sectors): "
"number of blocks\n");
dump_histogram(zb->zb_psize_histogram,
PSIZE_HISTO_SIZE, 0);
}
}
}
/* Output a table summarizing block sizes in the pool */
if (dump_opt['b'] >= 2) {
dump_size_histograms(&zcb);
}
}
(void) printf("\n");
if (leaks)
return (2);
if (zcb.zcb_haderrors)
return (3);
return (0);
}
typedef struct zdb_ddt_entry {
ddt_key_t zdde_key;
uint64_t zdde_ref_blocks;
uint64_t zdde_ref_lsize;
uint64_t zdde_ref_psize;
uint64_t zdde_ref_dsize;
avl_node_t zdde_node;
} zdb_ddt_entry_t;
/* ARGSUSED */
static int
zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
avl_tree_t *t = arg;
avl_index_t where;
zdb_ddt_entry_t *zdde, zdde_search;
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp))
return (0);
if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) {
(void) printf("traversing objset %llu, %llu objects, "
"%lu blocks so far\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)BP_GET_FILL(bp),
avl_numnodes(t));
}
if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF ||
BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
return (0);
ddt_key_fill(&zdde_search.zdde_key, bp);
zdde = avl_find(t, &zdde_search, &where);
if (zdde == NULL) {
zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL);
zdde->zdde_key = zdde_search.zdde_key;
avl_insert(t, zdde, where);
}
zdde->zdde_ref_blocks += 1;
zdde->zdde_ref_lsize += BP_GET_LSIZE(bp);
zdde->zdde_ref_psize += BP_GET_PSIZE(bp);
zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp);
return (0);
}
static void
dump_simulated_ddt(spa_t *spa)
{
avl_tree_t t;
void *cookie = NULL;
zdb_ddt_entry_t *zdde;
ddt_histogram_t ddh_total;
ddt_stat_t dds_total;
bzero(&ddh_total, sizeof (ddh_total));
bzero(&dds_total, sizeof (dds_total));
avl_create(&t, ddt_entry_compare,
sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
(void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, zdb_ddt_add_cb, &t);
spa_config_exit(spa, SCL_CONFIG, FTAG);
while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) {
ddt_stat_t dds;
uint64_t refcnt = zdde->zdde_ref_blocks;
ASSERT(refcnt != 0);
dds.dds_blocks = zdde->zdde_ref_blocks / refcnt;
dds.dds_lsize = zdde->zdde_ref_lsize / refcnt;
dds.dds_psize = zdde->zdde_ref_psize / refcnt;
dds.dds_dsize = zdde->zdde_ref_dsize / refcnt;
dds.dds_ref_blocks = zdde->zdde_ref_blocks;
dds.dds_ref_lsize = zdde->zdde_ref_lsize;
dds.dds_ref_psize = zdde->zdde_ref_psize;
dds.dds_ref_dsize = zdde->zdde_ref_dsize;
ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1],
&dds, 0);
umem_free(zdde, sizeof (*zdde));
}
avl_destroy(&t);
ddt_histogram_stat(&dds_total, &ddh_total);
(void) printf("Simulated DDT histogram:\n");
zpool_dump_ddt(&dds_total, &ddh_total);
dump_dedup_ratio(&dds_total);
}
static int
verify_device_removal_feature_counts(spa_t *spa)
{
uint64_t dr_feature_refcount = 0;
uint64_t oc_feature_refcount = 0;
uint64_t indirect_vdev_count = 0;
uint64_t precise_vdev_count = 0;
uint64_t obsolete_counts_object_count = 0;
uint64_t obsolete_sm_count = 0;
uint64_t obsolete_counts_count = 0;
uint64_t scip_count = 0;
uint64_t obsolete_bpobj_count = 0;
int ret = 0;
spa_condensing_indirect_phys_t *scip =
&spa->spa_condensing_indirect_phys;
if (scip->scip_next_mapping_object != 0) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[scip->scip_vdev];
ASSERT(scip->scip_prev_obsolete_sm_object != 0);
ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
(void) printf("Condensing indirect vdev %llu: new mapping "
"object %llu, prev obsolete sm %llu\n",
(u_longlong_t)scip->scip_vdev,
(u_longlong_t)scip->scip_next_mapping_object,
(u_longlong_t)scip->scip_prev_obsolete_sm_object);
if (scip->scip_prev_obsolete_sm_object != 0) {
space_map_t *prev_obsolete_sm = NULL;
VERIFY0(space_map_open(&prev_obsolete_sm,
spa->spa_meta_objset,
scip->scip_prev_obsolete_sm_object,
0, vd->vdev_asize, 0));
dump_spacemap(spa->spa_meta_objset, prev_obsolete_sm);
(void) printf("\n");
space_map_close(prev_obsolete_sm);
}
scip_count += 2;
}
for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
if (vic->vic_mapping_object != 0) {
ASSERT(vd->vdev_ops == &vdev_indirect_ops ||
vd->vdev_removing);
indirect_vdev_count++;
if (vd->vdev_indirect_mapping->vim_havecounts) {
obsolete_counts_count++;
}
}
boolean_t are_precise;
VERIFY0(vdev_obsolete_counts_are_precise(vd, &are_precise));
if (are_precise) {
ASSERT(vic->vic_mapping_object != 0);
precise_vdev_count++;
}
uint64_t obsolete_sm_object;
VERIFY0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vic->vic_mapping_object != 0);
obsolete_sm_count++;
}
}
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_DEVICE_REMOVAL],
&dr_feature_refcount);
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_OBSOLETE_COUNTS],
&oc_feature_refcount);
if (dr_feature_refcount != indirect_vdev_count) {
ret = 1;
(void) printf("Number of indirect vdevs (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)indirect_vdev_count,
(u_longlong_t)dr_feature_refcount);
} else {
(void) printf("Verified device_removal feature refcount " \
"of %llu is correct\n",
(u_longlong_t)dr_feature_refcount);
}
if (zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_OBSOLETE_BPOBJ) == 0) {
obsolete_bpobj_count++;
}
obsolete_counts_object_count = precise_vdev_count;
obsolete_counts_object_count += obsolete_sm_count;
obsolete_counts_object_count += obsolete_counts_count;
obsolete_counts_object_count += scip_count;
obsolete_counts_object_count += obsolete_bpobj_count;
obsolete_counts_object_count += remap_deadlist_count;
if (oc_feature_refcount != obsolete_counts_object_count) {
ret = 1;
(void) printf("Number of obsolete counts objects (%llu) " \
"does not match feature count (%llu)\n",
(u_longlong_t)obsolete_counts_object_count,
(u_longlong_t)oc_feature_refcount);
(void) printf("pv:%llu os:%llu oc:%llu sc:%llu "
"ob:%llu rd:%llu\n",
(u_longlong_t)precise_vdev_count,
(u_longlong_t)obsolete_sm_count,
(u_longlong_t)obsolete_counts_count,
(u_longlong_t)scip_count,
(u_longlong_t)obsolete_bpobj_count,
(u_longlong_t)remap_deadlist_count);
} else {
(void) printf("Verified indirect_refcount feature refcount " \
"of %llu is correct\n",
(u_longlong_t)oc_feature_refcount);
}
return (ret);
}
static void
zdb_set_skip_mmp(char *target)
{
spa_t *spa;
/*
* Disable the activity check to allow examination of
* active pools.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL) {
spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
}
mutex_exit(&spa_namespace_lock);
}
#define BOGUS_SUFFIX "_CHECKPOINTED_UNIVERSE"
/*
* Import the checkpointed state of the pool specified by the target
* parameter as readonly. The function also accepts a pool config
* as an optional parameter, else it attempts to infer the config by
* the name of the target pool.
*
* Note that the checkpointed state's pool name will be the name of
* the original pool with the above suffix appended to it. In addition,
* if the target is not a pool name (e.g. a path to a dataset) then
* the new_path parameter is populated with the updated path to
* reflect the fact that we are looking into the checkpointed state.
*
* The function returns a newly-allocated copy of the name of the
* pool containing the checkpointed state. When this copy is no
* longer needed it should be freed with free(3C). Same thing
* applies to the new_path parameter if allocated.
*/
static char *
import_checkpointed_state(char *target, nvlist_t *cfg, char **new_path)
{
int error = 0;
char *poolname, *bogus_name = NULL;
boolean_t freecfg = B_FALSE;
/* If the target is not a pool, the extract the pool name */
char *path_start = strchr(target, '/');
if (path_start != NULL) {
size_t poolname_len = path_start - target;
poolname = strndup(target, poolname_len);
} else {
poolname = target;
}
if (cfg == NULL) {
zdb_set_skip_mmp(poolname);
error = spa_get_stats(poolname, &cfg, NULL, 0);
if (error != 0) {
fatal("Tried to read config of pool \"%s\" but "
"spa_get_stats() failed with error %d\n",
poolname, error);
}
freecfg = B_TRUE;
}
if (asprintf(&bogus_name, "%s%s", poolname, BOGUS_SUFFIX) == -1)
return (NULL);
fnvlist_add_string(cfg, ZPOOL_CONFIG_POOL_NAME, bogus_name);
error = spa_import(bogus_name, cfg, NULL,
ZFS_IMPORT_MISSING_LOG | ZFS_IMPORT_CHECKPOINT |
ZFS_IMPORT_SKIP_MMP);
if (freecfg)
nvlist_free(cfg);
if (error != 0) {
fatal("Tried to import pool \"%s\" but spa_import() failed "
"with error %d\n", bogus_name, error);
}
if (new_path != NULL && path_start != NULL) {
if (asprintf(new_path, "%s%s", bogus_name, path_start) == -1) {
if (path_start != NULL)
free(poolname);
return (NULL);
}
}
if (target != poolname)
free(poolname);
return (bogus_name);
}
typedef struct verify_checkpoint_sm_entry_cb_arg {
vdev_t *vcsec_vd;
/* the following fields are only used for printing progress */
uint64_t vcsec_entryid;
uint64_t vcsec_num_entries;
} verify_checkpoint_sm_entry_cb_arg_t;
#define ENTRIES_PER_PROGRESS_UPDATE 10000
static int
verify_checkpoint_sm_entry_cb(space_map_entry_t *sme, void *arg)
{
verify_checkpoint_sm_entry_cb_arg_t *vcsec = arg;
vdev_t *vd = vcsec->vcsec_vd;
metaslab_t *ms = vd->vdev_ms[sme->sme_offset >> vd->vdev_ms_shift];
uint64_t end = sme->sme_offset + sme->sme_run;
ASSERT(sme->sme_type == SM_FREE);
if ((vcsec->vcsec_entryid % ENTRIES_PER_PROGRESS_UPDATE) == 0) {
(void) fprintf(stderr,
"\rverifying vdev %llu, space map entry %llu of %llu ...",
(longlong_t)vd->vdev_id,
(longlong_t)vcsec->vcsec_entryid,
(longlong_t)vcsec->vcsec_num_entries);
}
vcsec->vcsec_entryid++;
/*
* See comment in checkpoint_sm_exclude_entry_cb()
*/
VERIFY3U(sme->sme_offset, >=, ms->ms_start);
VERIFY3U(end, <=, ms->ms_start + ms->ms_size);
/*
* The entries in the vdev_checkpoint_sm should be marked as
* allocated in the checkpointed state of the pool, therefore
* their respective ms_allocateable trees should not contain them.
*/
mutex_enter(&ms->ms_lock);
range_tree_verify_not_present(ms->ms_allocatable,
sme->sme_offset, sme->sme_run);
mutex_exit(&ms->ms_lock);
return (0);
}
/*
* Verify that all segments in the vdev_checkpoint_sm are allocated
* according to the checkpoint's ms_sm (i.e. are not in the checkpoint's
* ms_allocatable).
*
* Do so by comparing the checkpoint space maps (vdev_checkpoint_sm) of
* each vdev in the current state of the pool to the metaslab space maps
* (ms_sm) of the checkpointed state of the pool.
*
* Note that the function changes the state of the ms_allocatable
* trees of the current spa_t. The entries of these ms_allocatable
* trees are cleared out and then repopulated from with the free
* entries of their respective ms_sm space maps.
*/
static void
verify_checkpoint_vdev_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_FREE);
for (uint64_t c = 0; c < ckpoint_rvd->vdev_children; c++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[c];
vdev_t *current_vd = current_rvd->vdev_child[c];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* Since we don't allow device removal in a pool
* that has a checkpoint, we expect that all removed
* vdevs were removed from the pool before the
* checkpoint.
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
/*
* If the checkpoint space map doesn't exist, then nothing
* here is checkpointed so there's nothing to verify.
*/
if (current_vd->vdev_top_zap == 0 ||
zap_contains(spa_meta_objset(current),
current_vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(current),
current_vd->vdev_top_zap, VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(current),
checkpoint_sm_obj, 0, current_vd->vdev_asize,
current_vd->vdev_ashift));
verify_checkpoint_sm_entry_cb_arg_t vcsec;
vcsec.vcsec_vd = ckpoint_vd;
vcsec.vcsec_entryid = 0;
vcsec.vcsec_num_entries =
space_map_length(checkpoint_sm) / sizeof (uint64_t);
VERIFY0(space_map_iterate(checkpoint_sm,
space_map_length(checkpoint_sm),
verify_checkpoint_sm_entry_cb, &vcsec));
if (dump_opt['m'] > 3)
dump_spacemap(current->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
/*
* If we've added vdevs since we took the checkpoint, ensure
* that their checkpoint space maps are empty.
*/
if (ckpoint_rvd->vdev_children < current_rvd->vdev_children) {
for (uint64_t c = ckpoint_rvd->vdev_children;
c < current_rvd->vdev_children; c++) {
vdev_t *current_vd = current_rvd->vdev_child[c];
VERIFY3P(current_vd->vdev_checkpoint_sm, ==, NULL);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
/*
* Verifies that all space that's allocated in the checkpoint is
* still allocated in the current version, by checking that everything
* in checkpoint's ms_allocatable (which is actually allocated, not
* allocatable/free) is not present in current's ms_allocatable.
*
* Note that the function changes the state of the ms_allocatable
* trees of both spas when called. The entries of all ms_allocatable
* trees are cleared out and then repopulated from their respective
* ms_sm space maps. In the checkpointed state we load the allocated
* entries, and in the current state we load the free entries.
*/
static void
verify_checkpoint_ms_spacemaps(spa_t *checkpoint, spa_t *current)
{
vdev_t *ckpoint_rvd = checkpoint->spa_root_vdev;
vdev_t *current_rvd = current->spa_root_vdev;
load_concrete_ms_allocatable_trees(checkpoint, SM_ALLOC);
load_concrete_ms_allocatable_trees(current, SM_FREE);
for (uint64_t i = 0; i < ckpoint_rvd->vdev_children; i++) {
vdev_t *ckpoint_vd = ckpoint_rvd->vdev_child[i];
vdev_t *current_vd = current_rvd->vdev_child[i];
if (ckpoint_vd->vdev_ops == &vdev_indirect_ops) {
/*
* See comment in verify_checkpoint_vdev_spacemaps()
*/
ASSERT3P(current_vd->vdev_ops, ==, &vdev_indirect_ops);
continue;
}
for (uint64_t m = 0; m < ckpoint_vd->vdev_ms_count; m++) {
metaslab_t *ckpoint_msp = ckpoint_vd->vdev_ms[m];
metaslab_t *current_msp = current_vd->vdev_ms[m];
(void) fprintf(stderr,
"\rverifying vdev %llu of %llu, "
"metaslab %llu of %llu ...",
(longlong_t)current_vd->vdev_id,
(longlong_t)current_rvd->vdev_children,
(longlong_t)current_vd->vdev_ms[m]->ms_id,
(longlong_t)current_vd->vdev_ms_count);
/*
* We walk through the ms_allocatable trees that
* are loaded with the allocated blocks from the
* ms_sm spacemaps of the checkpoint. For each
* one of these ranges we ensure that none of them
* exists in the ms_allocatable trees of the
* current state which are loaded with the ranges
* that are currently free.
*
* This way we ensure that none of the blocks that
* are part of the checkpoint were freed by mistake.
*/
range_tree_walk(ckpoint_msp->ms_allocatable,
(range_tree_func_t *)range_tree_verify_not_present,
current_msp->ms_allocatable);
}
}
/* for cleaner progress output */
(void) fprintf(stderr, "\n");
}
static void
verify_checkpoint_blocks(spa_t *spa)
{
ASSERT(!dump_opt['L']);
spa_t *checkpoint_spa;
char *checkpoint_pool;
int error = 0;
/*
* We import the checkpointed state of the pool (under a different
* name) so we can do verification on it against the current state
* of the pool.
*/
checkpoint_pool = import_checkpointed_state(spa->spa_name, NULL,
NULL);
ASSERT(strcmp(spa->spa_name, checkpoint_pool) != 0);
error = spa_open(checkpoint_pool, &checkpoint_spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but spa_open() failed with "
"error %d\n", checkpoint_pool, error);
}
/*
* Ensure that ranges in the checkpoint space maps of each vdev
* are allocated according to the checkpointed state's metaslab
* space maps.
*/
verify_checkpoint_vdev_spacemaps(checkpoint_spa, spa);
/*
* Ensure that allocated ranges in the checkpoint's metaslab
* space maps remain allocated in the metaslab space maps of
* the current state.
*/
verify_checkpoint_ms_spacemaps(checkpoint_spa, spa);
/*
* Once we are done, we get rid of the checkpointed state.
*/
spa_close(checkpoint_spa, FTAG);
free(checkpoint_pool);
}
static void
dump_leftover_checkpoint_blocks(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
space_map_t *checkpoint_sm = NULL;
uint64_t checkpoint_sm_obj;
if (vd->vdev_top_zap == 0)
continue;
if (zap_contains(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM) != 0)
continue;
VERIFY0(zap_lookup(spa_meta_objset(spa), vd->vdev_top_zap,
VDEV_TOP_ZAP_POOL_CHECKPOINT_SM,
sizeof (uint64_t), 1, &checkpoint_sm_obj));
VERIFY0(space_map_open(&checkpoint_sm, spa_meta_objset(spa),
checkpoint_sm_obj, 0, vd->vdev_asize, vd->vdev_ashift));
dump_spacemap(spa->spa_meta_objset, checkpoint_sm);
space_map_close(checkpoint_sm);
}
}
static int
verify_checkpoint(spa_t *spa)
{
uberblock_t checkpoint;
int error;
if (!spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT))
return (0);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT && !dump_opt['L']) {
/*
* If the feature is active but the uberblock is missing
* then we must be in the middle of discarding the
* checkpoint.
*/
(void) printf("\nPartially discarded checkpoint "
"state found:\n");
if (dump_opt['m'] > 3)
dump_leftover_checkpoint_blocks(spa);
return (0);
} else if (error != 0) {
(void) printf("lookup error %d when looking for "
"checkpointed uberblock in MOS\n", error);
return (error);
}
dump_uberblock(&checkpoint, "\nCheckpointed uberblock found:\n", "\n");
if (checkpoint.ub_checkpoint_txg == 0) {
(void) printf("\nub_checkpoint_txg not set in checkpointed "
"uberblock\n");
error = 3;
}
if (error == 0 && !dump_opt['L'])
verify_checkpoint_blocks(spa);
return (error);
}
/* ARGSUSED */
static void
mos_leaks_cb(void *arg, uint64_t start, uint64_t size)
{
for (uint64_t i = start; i < size; i++) {
(void) printf("MOS object %llu referenced but not allocated\n",
(u_longlong_t)i);
}
}
static void
mos_obj_refd(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL)
range_tree_add(mos_refd_objs, obj, 1);
}
/*
* Call on a MOS object that may already have been referenced.
*/
static void
mos_obj_refd_multiple(uint64_t obj)
{
if (obj != 0 && mos_refd_objs != NULL &&
!range_tree_contains(mos_refd_objs, obj, 1))
range_tree_add(mos_refd_objs, obj, 1);
}
static void
mos_leak_vdev_top_zap(vdev_t *vd)
{
uint64_t ms_flush_data_obj;
int error = zap_lookup(spa_meta_objset(vd->vdev_spa),
vd->vdev_top_zap, VDEV_TOP_ZAP_MS_UNFLUSHED_PHYS_TXGS,
sizeof (ms_flush_data_obj), 1, &ms_flush_data_obj);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(ms_flush_data_obj);
}
static void
mos_leak_vdev(vdev_t *vd)
{
mos_obj_refd(vd->vdev_dtl_object);
mos_obj_refd(vd->vdev_ms_array);
mos_obj_refd(vd->vdev_indirect_config.vic_births_object);
mos_obj_refd(vd->vdev_indirect_config.vic_mapping_object);
mos_obj_refd(vd->vdev_leaf_zap);
if (vd->vdev_checkpoint_sm != NULL)
mos_obj_refd(vd->vdev_checkpoint_sm->sm_object);
if (vd->vdev_indirect_mapping != NULL) {
mos_obj_refd(vd->vdev_indirect_mapping->
vim_phys->vimp_counts_object);
}
if (vd->vdev_obsolete_sm != NULL)
mos_obj_refd(vd->vdev_obsolete_sm->sm_object);
for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *ms = vd->vdev_ms[m];
mos_obj_refd(space_map_object(ms->ms_sm));
}
if (vd->vdev_top_zap != 0) {
mos_obj_refd(vd->vdev_top_zap);
mos_leak_vdev_top_zap(vd);
}
for (uint64_t c = 0; c < vd->vdev_children; c++) {
mos_leak_vdev(vd->vdev_child[c]);
}
}
static void
mos_leak_log_spacemaps(spa_t *spa)
{
uint64_t spacemap_zap;
int error = zap_lookup(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_LOG_SPACEMAP_ZAP,
sizeof (spacemap_zap), 1, &spacemap_zap);
if (error == ENOENT)
return;
ASSERT0(error);
mos_obj_refd(spacemap_zap);
for (spa_log_sm_t *sls = avl_first(&spa->spa_sm_logs_by_txg);
sls; sls = AVL_NEXT(&spa->spa_sm_logs_by_txg, sls))
mos_obj_refd(sls->sls_sm_obj);
}
static int
dump_mos_leaks(spa_t *spa)
{
int rv = 0;
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
/* Visit and mark all referenced objects in the MOS */
mos_obj_refd(DMU_POOL_DIRECTORY_OBJECT);
mos_obj_refd(spa->spa_pool_props_object);
mos_obj_refd(spa->spa_config_object);
mos_obj_refd(spa->spa_ddt_stat_object);
mos_obj_refd(spa->spa_feat_desc_obj);
mos_obj_refd(spa->spa_feat_enabled_txg_obj);
mos_obj_refd(spa->spa_feat_for_read_obj);
mos_obj_refd(spa->spa_feat_for_write_obj);
mos_obj_refd(spa->spa_history);
mos_obj_refd(spa->spa_errlog_last);
mos_obj_refd(spa->spa_errlog_scrub);
mos_obj_refd(spa->spa_all_vdev_zaps);
mos_obj_refd(spa->spa_dsl_pool->dp_bptree_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_tmp_userrefs_obj);
mos_obj_refd(spa->spa_dsl_pool->dp_scan->scn_phys.scn_queue_obj);
bpobj_count_refd(&spa->spa_deferred_bpobj);
mos_obj_refd(dp->dp_empty_bpobj);
bpobj_count_refd(&dp->dp_obsolete_bpobj);
bpobj_count_refd(&dp->dp_free_bpobj);
mos_obj_refd(spa->spa_l2cache.sav_object);
mos_obj_refd(spa->spa_spares.sav_object);
if (spa->spa_syncing_log_sm != NULL)
mos_obj_refd(spa->spa_syncing_log_sm->sm_object);
mos_leak_log_spacemaps(spa);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_next_mapping_object);
mos_obj_refd(spa->spa_condensing_indirect_phys.
scip_prev_obsolete_sm_object);
if (spa->spa_condensing_indirect_phys.scip_next_mapping_object != 0) {
vdev_indirect_mapping_t *vim =
vdev_indirect_mapping_open(mos,
spa->spa_condensing_indirect_phys.scip_next_mapping_object);
mos_obj_refd(vim->vim_phys->vimp_counts_object);
vdev_indirect_mapping_close(vim);
}
deleted_livelists_dump_mos(spa);
if (dp->dp_origin_snap != NULL) {
dsl_dataset_t *ds;
dsl_pool_config_enter(dp, FTAG);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(dp->dp_origin_snap)->ds_next_snap_obj,
FTAG, &ds));
count_ds_mos_objects(ds);
dump_blkptr_list(&ds->ds_deadlist, "Deadlist");
dsl_dataset_rele(ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
count_ds_mos_objects(dp->dp_origin_snap);
dump_blkptr_list(&dp->dp_origin_snap->ds_deadlist, "Deadlist");
}
count_dir_mos_objects(dp->dp_mos_dir);
if (dp->dp_free_dir != NULL)
count_dir_mos_objects(dp->dp_free_dir);
if (dp->dp_leak_dir != NULL)
count_dir_mos_objects(dp->dp_leak_dir);
mos_leak_vdev(spa->spa_root_vdev);
for (uint64_t class = 0; class < DDT_CLASSES; class++) {
for (uint64_t type = 0; type < DDT_TYPES; type++) {
for (uint64_t cksum = 0;
cksum < ZIO_CHECKSUM_FUNCTIONS; cksum++) {
ddt_t *ddt = spa->spa_ddt[cksum];
mos_obj_refd(ddt->ddt_object[type][class]);
}
}
}
/*
* Visit all allocated objects and make sure they are referenced.
*/
uint64_t object = 0;
while (dmu_object_next(mos, &object, B_FALSE, 0) == 0) {
if (range_tree_contains(mos_refd_objs, object, 1)) {
range_tree_remove(mos_refd_objs, object, 1);
} else {
dmu_object_info_t doi;
const char *name;
dmu_object_info(mos, object, &doi);
if (doi.doi_type & DMU_OT_NEWTYPE) {
dmu_object_byteswap_t bswap =
DMU_OT_BYTESWAP(doi.doi_type);
name = dmu_ot_byteswap[bswap].ob_name;
} else {
name = dmu_ot[doi.doi_type].ot_name;
}
(void) printf("MOS object %llu (%s) leaked\n",
(u_longlong_t)object, name);
rv = 2;
}
}
(void) range_tree_walk(mos_refd_objs, mos_leaks_cb, NULL);
if (!range_tree_is_empty(mos_refd_objs))
rv = 2;
range_tree_vacate(mos_refd_objs, NULL, NULL);
range_tree_destroy(mos_refd_objs);
return (rv);
}
typedef struct log_sm_obsolete_stats_arg {
uint64_t lsos_current_txg;
uint64_t lsos_total_entries;
uint64_t lsos_valid_entries;
uint64_t lsos_sm_entries;
uint64_t lsos_valid_sm_entries;
} log_sm_obsolete_stats_arg_t;
static int
log_spacemap_obsolete_stats_cb(spa_t *spa, space_map_entry_t *sme,
uint64_t txg, void *arg)
{
log_sm_obsolete_stats_arg_t *lsos = arg;
uint64_t offset = sme->sme_offset;
uint64_t vdev_id = sme->sme_vdev;
if (lsos->lsos_current_txg == 0) {
/* this is the first log */
lsos->lsos_current_txg = txg;
} else if (lsos->lsos_current_txg < txg) {
/* we just changed log - print stats and reset */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos->lsos_valid_sm_entries,
(u_longlong_t)lsos->lsos_sm_entries,
(u_longlong_t)lsos->lsos_current_txg);
lsos->lsos_valid_sm_entries = 0;
lsos->lsos_sm_entries = 0;
lsos->lsos_current_txg = txg;
}
ASSERT3U(lsos->lsos_current_txg, ==, txg);
lsos->lsos_sm_entries++;
lsos->lsos_total_entries++;
vdev_t *vd = vdev_lookup_top(spa, vdev_id);
if (!vdev_is_concrete(vd))
return (0);
metaslab_t *ms = vd->vdev_ms[offset >> vd->vdev_ms_shift];
ASSERT(sme->sme_type == SM_ALLOC || sme->sme_type == SM_FREE);
if (txg < metaslab_unflushed_txg(ms))
return (0);
lsos->lsos_valid_sm_entries++;
lsos->lsos_valid_entries++;
return (0);
}
static void
dump_log_spacemap_obsolete_stats(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return;
log_sm_obsolete_stats_arg_t lsos;
bzero(&lsos, sizeof (lsos));
(void) printf("Log Space Map Obsolete Entry Statistics:\n");
iterate_through_spacemap_logs(spa,
log_spacemap_obsolete_stats_cb, &lsos);
/* print stats for latest log */
(void) printf("%-8llu valid entries out of %-8llu - txg %llu\n",
(u_longlong_t)lsos.lsos_valid_sm_entries,
(u_longlong_t)lsos.lsos_sm_entries,
(u_longlong_t)lsos.lsos_current_txg);
(void) printf("%-8llu valid entries out of %-8llu - total\n\n",
(u_longlong_t)lsos.lsos_valid_entries,
(u_longlong_t)lsos.lsos_total_entries);
}
static void
dump_zpool(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
int rc = 0;
if (dump_opt['y']) {
livelist_metaslab_validate(spa);
}
if (dump_opt['S']) {
dump_simulated_ddt(spa);
return;
}
if (!dump_opt['e'] && dump_opt['C'] > 1) {
(void) printf("\nCached configuration:\n");
dump_nvlist(spa->spa_config, 8);
}
if (dump_opt['C'])
dump_config(spa);
if (dump_opt['u'])
dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n");
if (dump_opt['D'])
dump_all_ddts(spa);
if (dump_opt['d'] > 2 || dump_opt['m'])
dump_metaslabs(spa);
if (dump_opt['M'])
dump_metaslab_groups(spa);
if (dump_opt['d'] > 2 || dump_opt['m']) {
dump_log_spacemaps(spa);
dump_log_spacemap_obsolete_stats(spa);
}
if (dump_opt['d'] || dump_opt['i']) {
spa_feature_t f;
mos_refd_objs = range_tree_create(NULL, RANGE_SEG64, NULL, 0,
0);
dump_objset(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) {
dsl_pool_t *dp = spa->spa_dsl_pool;
dump_full_bpobj(&spa->spa_deferred_bpobj,
"Deferred frees", 0);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
dump_full_bpobj(&dp->dp_free_bpobj,
"Pool snapshot frees", 0);
}
if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
ASSERT(spa_feature_is_enabled(spa,
SPA_FEATURE_DEVICE_REMOVAL));
dump_full_bpobj(&dp->dp_obsolete_bpobj,
"Pool obsolete blocks", 0);
}
if (spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY)) {
dump_bptree(spa->spa_meta_objset,
dp->dp_bptree_obj,
"Pool dataset frees");
}
dump_dtl(spa->spa_root_vdev, 0);
}
for (spa_feature_t f = 0; f < SPA_FEATURES; f++)
global_feature_count[f] = UINT64_MAX;
global_feature_count[SPA_FEATURE_REDACTION_BOOKMARKS] = 0;
global_feature_count[SPA_FEATURE_BOOKMARK_WRITTEN] = 0;
global_feature_count[SPA_FEATURE_LIVELIST] = 0;
(void) dmu_objset_find(spa_name(spa), dump_one_objset,
NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
if (rc == 0 && !dump_opt['L'])
rc = dump_mos_leaks(spa);
for (f = 0; f < SPA_FEATURES; f++) {
uint64_t refcount;
uint64_t *arr;
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET)) {
if (global_feature_count[f] == UINT64_MAX)
continue;
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(global_feature_count[f]);
continue;
}
arr = global_feature_count;
} else {
if (!spa_feature_is_enabled(spa, f)) {
ASSERT0(dataset_feature_count[f]);
continue;
}
arr = dataset_feature_count;
}
if (feature_get_refcount(spa, &spa_feature_table[f],
&refcount) == ENOTSUP)
continue;
if (arr[f] != refcount) {
(void) printf("%s feature refcount mismatch: "
"%lld consumers != %lld refcount\n",
spa_feature_table[f].fi_uname,
(longlong_t)arr[f], (longlong_t)refcount);
rc = 2;
} else {
(void) printf("Verified %s feature refcount "
"of %llu is correct\n",
spa_feature_table[f].fi_uname,
(longlong_t)refcount);
}
}
if (rc == 0)
rc = verify_device_removal_feature_counts(spa);
}
if (rc == 0 && (dump_opt['b'] || dump_opt['c']))
rc = dump_block_stats(spa);
if (rc == 0)
rc = verify_spacemap_refcounts(spa);
if (dump_opt['s'])
show_pool_stats(spa);
if (dump_opt['h'])
dump_history(spa);
if (rc == 0)
rc = verify_checkpoint(spa);
if (rc != 0) {
dump_debug_buffer();
exit(rc);
}
}
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_RAW 0x0020
#define ZDB_FLAG_PRINT_BLKPTR 0x0040
#define ZDB_FLAG_VERBOSE 0x0080
static int flagbits[256];
static char flagbitstr[16];
static void
zdb_print_blkptr(const blkptr_t *bp, int flags)
{
char blkbuf[BP_SPRINTF_LEN];
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array((void *)bp, sizeof (blkptr_t));
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static void
zdb_dump_indirect(blkptr_t *bp, int nbps, int flags)
{
int i;
for (i = 0; i < nbps; i++)
zdb_print_blkptr(&bp[i], flags);
}
static void
zdb_dump_gbh(void *buf, int flags)
{
zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags);
}
static void
zdb_dump_block_raw(void *buf, uint64_t size, int flags)
{
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array(buf, size);
VERIFY(write(fileno(stdout), buf, size) == size);
}
static void
zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
{
uint64_t *d = (uint64_t *)buf;
unsigned nwords = size / sizeof (uint64_t);
int do_bswap = !!(flags & ZDB_FLAG_BSWAP);
unsigned i, j;
const char *hdr;
char *c;
if (do_bswap)
hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8";
else
hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f";
(void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr);
#ifdef _LITTLE_ENDIAN
/* correct the endianness */
do_bswap = !do_bswap;
#endif
for (i = 0; i < nwords; i += 2) {
(void) printf("%06llx: %016llx %016llx ",
(u_longlong_t)(i * sizeof (uint64_t)),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1]));
c = (char *)&d[i];
for (j = 0; j < 2 * sizeof (uint64_t); j++)
(void) printf("%c", isprint(c[j]) ? c[j] : '.');
(void) printf("\n");
}
}
/*
* There are two acceptable formats:
* leaf_name - For example: c1t0d0 or /tmp/ztest.0a
* child[.child]* - For example: 0.1.1
*
* The second form can be used to specify arbitrary vdevs anywhere
* in the hierarchy. For example, in a pool with a mirror of
* RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
*/
static vdev_t *
zdb_vdev_lookup(vdev_t *vdev, const char *path)
{
char *s, *p, *q;
unsigned i;
if (vdev == NULL)
return (NULL);
/* First, assume the x.x.x.x format */
i = strtoul(path, &s, 10);
if (s == path || (s && *s != '.' && *s != '\0'))
goto name;
if (i >= vdev->vdev_children)
return (NULL);
vdev = vdev->vdev_child[i];
if (s && *s == '\0')
return (vdev);
return (zdb_vdev_lookup(vdev, s+1));
name:
for (i = 0; i < vdev->vdev_children; i++) {
vdev_t *vc = vdev->vdev_child[i];
if (vc->vdev_path == NULL) {
vc = zdb_vdev_lookup(vc, path);
if (vc == NULL)
continue;
else
return (vc);
}
p = strrchr(vc->vdev_path, '/');
p = p ? p + 1 : vc->vdev_path;
q = &vc->vdev_path[strlen(vc->vdev_path) - 2];
if (strcmp(vc->vdev_path, path) == 0)
return (vc);
if (strcmp(p, path) == 0)
return (vc);
if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0)
return (vc);
}
return (NULL);
}
static int
name_from_objset_id(spa_t *spa, uint64_t objset_id, char *outstr)
{
dsl_dataset_t *ds;
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
int error = dsl_dataset_hold_obj(spa->spa_dsl_pool, objset_id,
NULL, &ds);
if (error != 0) {
(void) fprintf(stderr, "failed to hold objset %llu: %s\n",
(u_longlong_t)objset_id, strerror(error));
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (error);
}
dsl_dataset_name(ds, outstr);
dsl_dataset_rele(ds, NULL);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
return (0);
}
static boolean_t
zdb_parse_block_sizes(char *sizes, uint64_t *lsize, uint64_t *psize)
{
char *s0, *s1, *tmp = NULL;
if (sizes == NULL)
return (B_FALSE);
s0 = strtok_r(sizes, "/", &tmp);
if (s0 == NULL)
return (B_FALSE);
s1 = strtok_r(NULL, "/", &tmp);
*lsize = strtoull(s0, NULL, 16);
*psize = s1 ? strtoull(s1, NULL, 16) : *lsize;
return (*lsize >= *psize && *psize > 0);
}
#define ZIO_COMPRESS_MASK(alg) (1ULL << (ZIO_COMPRESS_##alg))
static boolean_t
zdb_decompress_block(abd_t *pabd, void *buf, void *lbuf, uint64_t lsize,
uint64_t psize, int flags)
{
boolean_t exceeded = B_FALSE;
/*
* We don't know how the data was compressed, so just try
* every decompress function at every inflated blocksize.
*/
void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
int cfuncs[ZIO_COMPRESS_FUNCTIONS] = { 0 };
int *cfuncp = cfuncs;
uint64_t maxlsize = SPA_MAXBLOCKSIZE;
uint64_t mask = ZIO_COMPRESS_MASK(ON) | ZIO_COMPRESS_MASK(OFF) |
ZIO_COMPRESS_MASK(INHERIT) | ZIO_COMPRESS_MASK(EMPTY) |
(getenv("ZDB_NO_ZLE") ? ZIO_COMPRESS_MASK(ZLE) : 0);
*cfuncp++ = ZIO_COMPRESS_LZ4;
*cfuncp++ = ZIO_COMPRESS_LZJB;
mask |= ZIO_COMPRESS_MASK(LZ4) | ZIO_COMPRESS_MASK(LZJB);
for (int c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++)
if (((1ULL << c) & mask) == 0)
*cfuncp++ = c;
/*
* On the one hand, with SPA_MAXBLOCKSIZE at 16MB, this
* could take a while and we should let the user know
* we are not stuck. On the other hand, printing progress
* info gets old after a while. User can specify 'v' flag
* to see the progression.
*/
if (lsize == psize)
lsize += SPA_MINBLOCKSIZE;
else
maxlsize = lsize;
for (; lsize <= maxlsize; lsize += SPA_MINBLOCKSIZE) {
for (cfuncp = cfuncs; *cfuncp; cfuncp++) {
if (flags & ZDB_FLAG_VERBOSE) {
(void) fprintf(stderr,
"Trying %05llx -> %05llx (%s)\n",
(u_longlong_t)psize,
(u_longlong_t)lsize,
zio_compress_table[*cfuncp].\
ci_name);
}
/*
* We randomize lbuf2, and decompress to both
* lbuf and lbuf2. This way, we will know if
* decompression fill exactly to lsize.
*/
VERIFY0(random_get_pseudo_bytes(lbuf2, lsize));
if (zio_decompress_data(*cfuncp, pabd,
lbuf, psize, lsize, NULL) == 0 &&
zio_decompress_data(*cfuncp, pabd,
lbuf2, psize, lsize, NULL) == 0 &&
bcmp(lbuf, lbuf2, lsize) == 0)
break;
}
if (*cfuncp != 0)
break;
}
umem_free(lbuf2, SPA_MAXBLOCKSIZE);
if (lsize > maxlsize) {
exceeded = B_TRUE;
}
if (*cfuncp == ZIO_COMPRESS_ZLE) {
printf("\nZLE decompression was selected. If you "
"suspect the results are wrong,\ntry avoiding ZLE "
"by setting and exporting ZDB_NO_ZLE=\"true\"\n");
}
return (exceeded);
}
/*
* Read a block from a pool and print it out. The syntax of the
* block descriptor is:
*
* pool:vdev_specifier:offset:[lsize/]psize[:flags]
*
* pool - The name of the pool you wish to read from
* vdev_specifier - Which vdev (see comment for zdb_vdev_lookup)
* offset - offset, in hex, in bytes
* size - Amount of data to read, in hex, in bytes
* flags - A string of characters specifying options
* b: Decode a blkptr at given offset within block
* c: Calculate and display checksums
* d: Decompress data before dumping
* e: Byteswap data before dumping
* g: Display data as a gang block header
* i: Display as an indirect block
* r: Dump raw data to stdout
* v: Verbose
*
*/
static void
zdb_read_block(char *thing, spa_t *spa)
{
blkptr_t blk, *bp = &blk;
dva_t *dva = bp->blk_dva;
int flags = 0;
uint64_t offset = 0, psize = 0, lsize = 0, blkptr_offset = 0;
zio_t *zio;
vdev_t *vd;
abd_t *pabd;
void *lbuf, *buf;
char *s, *p, *dup, *vdev, *flagstr, *sizes, *tmp = NULL;
int i, error;
boolean_t borrowed = B_FALSE, found = B_FALSE;
dup = strdup(thing);
s = strtok_r(dup, ":", &tmp);
vdev = s ? s : "";
s = strtok_r(NULL, ":", &tmp);
offset = strtoull(s ? s : "", NULL, 16);
sizes = strtok_r(NULL, ":", &tmp);
s = strtok_r(NULL, ":", &tmp);
flagstr = strdup(s ? s : "");
s = NULL;
tmp = NULL;
if (!zdb_parse_block_sizes(sizes, &lsize, &psize))
s = "invalid size(s)";
if (!IS_P2ALIGNED(psize, DEV_BSIZE) || !IS_P2ALIGNED(lsize, DEV_BSIZE))
s = "size must be a multiple of sector size";
if (!IS_P2ALIGNED(offset, DEV_BSIZE))
s = "offset must be a multiple of sector size";
if (s) {
(void) printf("Invalid block specifier: %s - %s\n", thing, s);
goto done;
}
for (s = strtok_r(flagstr, ":", &tmp);
s != NULL;
s = strtok_r(NULL, ":", &tmp)) {
for (i = 0; i < strlen(flagstr); i++) {
int bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
(void) printf("***Ignoring flag: %c\n",
(uchar_t)flagstr[i]);
continue;
}
found = B_TRUE;
flags |= bit;
p = &flagstr[i + 1];
if (*p != ':' && *p != '\0') {
int j = 0, nextbit = flagbits[(uchar_t)*p];
char *end, offstr[8] = { 0 };
if ((bit == ZDB_FLAG_PRINT_BLKPTR) &&
(nextbit == 0)) {
/* look ahead to isolate the offset */
while (nextbit == 0 &&
strchr(flagbitstr, *p) == NULL) {
offstr[j] = *p;
j++;
if (i + j > strlen(flagstr))
break;
p++;
nextbit = flagbits[(uchar_t)*p];
}
blkptr_offset = strtoull(offstr, &end,
16);
i += j;
} else if (nextbit == 0) {
(void) printf("***Ignoring flag arg:"
" '%c'\n", (uchar_t)*p);
}
}
}
}
if (blkptr_offset % sizeof (blkptr_t)) {
printf("Block pointer offset 0x%llx "
"must be divisible by 0x%x\n",
(longlong_t)blkptr_offset, (int)sizeof (blkptr_t));
goto done;
}
if (found == B_FALSE && strlen(flagstr) > 0) {
printf("Invalid flag arg: '%s'\n", flagstr);
goto done;
}
vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev);
if (vd == NULL) {
(void) printf("***Invalid vdev: %s\n", vdev);
free(dup);
return;
} else {
if (vd->vdev_path)
(void) fprintf(stderr, "Found vdev: %s\n",
vd->vdev_path);
else
(void) fprintf(stderr, "Found vdev type: %s\n",
vd->vdev_ops->vdev_op_type);
}
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
BP_ZERO(bp);
DVA_SET_VDEV(&dva[0], vd->vdev_id);
DVA_SET_OFFSET(&dva[0], offset);
DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH));
DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize));
BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
BP_SET_LSIZE(bp, lsize);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
BP_SET_TYPE(bp, DMU_OT_NONE);
BP_SET_LEVEL(bp, 0);
BP_SET_DEDUP(bp, 0);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
zio = zio_root(spa, NULL, NULL, 0);
if (vd == vd->vdev_top) {
/*
* Treat this as a normal block read.
*/
zio_nowait(zio_read(zio, spa, bp, pabd, psize, NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL));
} else {
/*
* Treat this as a vdev child I/O.
*/
zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pabd,
psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY | ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_OPTIONAL, NULL, NULL));
}
error = zio_wait(zio);
spa_config_exit(spa, SCL_STATE, FTAG);
if (error) {
(void) printf("Read of %s failed, error: %d\n", thing, error);
goto out;
}
uint64_t orig_lsize = lsize;
buf = lbuf;
if (flags & ZDB_FLAG_DECOMPRESS) {
boolean_t failed = zdb_decompress_block(pabd, buf, lbuf,
lsize, psize, flags);
if (failed) {
(void) printf("Decompress of %s failed\n", thing);
goto out;
}
} else {
buf = abd_borrow_buf_copy(pabd, lsize);
borrowed = B_TRUE;
}
/*
* Try to detect invalid block pointer. If invalid, try
* decompressing.
*/
if ((flags & ZDB_FLAG_PRINT_BLKPTR || flags & ZDB_FLAG_INDIRECT) &&
!(flags & ZDB_FLAG_DECOMPRESS)) {
const blkptr_t *b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (zfs_blkptr_verify(spa, b, B_FALSE, BLK_VERIFY_ONLY) ==
B_FALSE) {
abd_return_buf_copy(pabd, buf, lsize);
borrowed = B_FALSE;
buf = lbuf;
boolean_t failed = zdb_decompress_block(pabd, buf,
lbuf, lsize, psize, flags);
b = (const blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset);
if (failed || zfs_blkptr_verify(spa, b, B_FALSE,
BLK_VERIFY_LOG) == B_FALSE) {
printf("invalid block pointer at this DVA\n");
goto out;
}
}
}
if (flags & ZDB_FLAG_PRINT_BLKPTR)
zdb_print_blkptr((blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset), flags);
else if (flags & ZDB_FLAG_RAW)
zdb_dump_block_raw(buf, lsize, flags);
else if (flags & ZDB_FLAG_INDIRECT)
zdb_dump_indirect((blkptr_t *)buf,
orig_lsize / sizeof (blkptr_t), flags);
else if (flags & ZDB_FLAG_GBH)
zdb_dump_gbh(buf, flags);
else
zdb_dump_block(thing, buf, lsize, flags);
/*
* If :c was specified, iterate through the checksum table to
* calculate and display each checksum for our specified
* DVA and length.
*/
if ((flags & ZDB_FLAG_CHECKSUM) && !(flags & ZDB_FLAG_RAW) &&
!(flags & ZDB_FLAG_GBH)) {
zio_t *czio;
(void) printf("\n");
for (enum zio_checksum ck = ZIO_CHECKSUM_LABEL;
ck < ZIO_CHECKSUM_FUNCTIONS; ck++) {
if ((zio_checksum_table[ck].ci_flags &
ZCHECKSUM_FLAG_EMBEDDED) ||
ck == ZIO_CHECKSUM_NOPARITY) {
continue;
}
BP_SET_CHECKSUM(bp, ck);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
czio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
czio->io_bp = bp;
if (vd == vd->vdev_top) {
zio_nowait(zio_read(czio, spa, bp, pabd, psize,
NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_DONT_RETRY, NULL));
} else {
zio_nowait(zio_vdev_child_io(czio, bp, vd,
offset, pabd, psize, ZIO_TYPE_READ,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_CACHE |
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW |
ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_OPTIONAL, NULL, NULL));
}
error = zio_wait(czio);
if (error == 0 || error == ECKSUM) {
zio_t *ck_zio = zio_root(spa, NULL, NULL, 0);
ck_zio->io_offset =
DVA_GET_OFFSET(&bp->blk_dva[0]);
ck_zio->io_bp = bp;
zio_checksum_compute(ck_zio, ck, pabd, lsize);
printf("%12s\tcksum=%llx:%llx:%llx:%llx\n",
zio_checksum_table[ck].ci_name,
(u_longlong_t)bp->blk_cksum.zc_word[0],
(u_longlong_t)bp->blk_cksum.zc_word[1],
(u_longlong_t)bp->blk_cksum.zc_word[2],
(u_longlong_t)bp->blk_cksum.zc_word[3]);
zio_wait(ck_zio);
} else {
printf("error %d reading block\n", error);
}
spa_config_exit(spa, SCL_STATE, FTAG);
}
}
if (borrowed)
abd_return_buf_copy(pabd, buf, lsize);
out:
abd_free(pabd);
umem_free(lbuf, SPA_MAXBLOCKSIZE);
done:
free(flagstr);
free(dup);
}
static void
zdb_embedded_block(char *thing)
{
blkptr_t bp;
unsigned long long *words = (void *)&bp;
char *buf;
int err;
bzero(&bp, sizeof (bp));
err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
"%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
words + 0, words + 1, words + 2, words + 3,
words + 4, words + 5, words + 6, words + 7,
words + 8, words + 9, words + 10, words + 11,
words + 12, words + 13, words + 14, words + 15);
if (err != 16) {
(void) fprintf(stderr, "invalid input format\n");
exit(1);
}
ASSERT3U(BPE_GET_LSIZE(&bp), <=, SPA_MAXBLOCKSIZE);
buf = malloc(SPA_MAXBLOCKSIZE);
if (buf == NULL) {
(void) fprintf(stderr, "out of memory\n");
exit(1);
}
err = decode_embedded_bp(&bp, buf, BPE_GET_LSIZE(&bp));
if (err != 0) {
(void) fprintf(stderr, "decode failed: %u\n", err);
exit(1);
}
zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
free(buf);
}
int
main(int argc, char **argv)
{
int c;
struct rlimit rl = { 1024, 1024 };
spa_t *spa = NULL;
objset_t *os = NULL;
int dump_all = 1;
int verbose = 0;
int error = 0;
char **searchdirs = NULL;
int nsearch = 0;
char *target, *target_pool, dsname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *policy = NULL;
uint64_t max_txg = UINT64_MAX;
int64_t objset_id = -1;
uint64_t object;
int flags = ZFS_IMPORT_MISSING_LOG;
int rewind = ZPOOL_NEVER_REWIND;
char *spa_config_path_env, *objset_str;
boolean_t target_is_spa = B_TRUE, dataset_lookup = B_FALSE;
nvlist_t *cfg = NULL;
(void) setrlimit(RLIMIT_NOFILE, &rl);
(void) enable_extended_FILE_stdio(-1, -1);
dprintf_setup(&argc, argv);
/*
* If there is an environment variable SPA_CONFIG_PATH it overrides
* default spa_config_path setting. If -U flag is specified it will
* override this environment variable settings once again.
*/
spa_config_path_env = getenv("SPA_CONFIG_PATH");
if (spa_config_path_env != NULL)
spa_config_path = spa_config_path_env;
/*
* For performance reasons, we set this tunable down. We do so before
* the arg parsing section so that the user can override this value if
* they choose.
*/
zfs_btree_verify_intensity = 3;
while ((c = getopt(argc, argv,
"AbcCdDeEFGhiI:klLmMo:Op:PqrRsSt:uU:vVx:XYyZ")) != -1) {
switch (c) {
case 'b':
case 'c':
case 'C':
case 'd':
case 'D':
case 'E':
case 'G':
case 'h':
case 'i':
case 'l':
case 'm':
case 'M':
case 'O':
case 'r':
case 'R':
case 's':
case 'S':
case 'u':
case 'y':
case 'Z':
dump_opt[c]++;
dump_all = 0;
break;
case 'A':
case 'e':
case 'F':
case 'k':
case 'L':
case 'P':
case 'q':
case 'X':
dump_opt[c]++;
break;
case 'Y':
zfs_reconstruct_indirect_combinations_max = INT_MAX;
zfs_deadman_enabled = 0;
break;
/* NB: Sort single match options below. */
case 'I':
max_inflight_bytes = strtoull(optarg, NULL, 0);
if (max_inflight_bytes == 0) {
(void) fprintf(stderr, "maximum number "
"of inflight bytes must be greater "
"than 0\n");
usage();
}
break;
case 'o':
error = set_global_var(optarg);
if (error != 0)
usage();
break;
case 'p':
if (searchdirs == NULL) {
searchdirs = umem_alloc(sizeof (char *),
UMEM_NOFAIL);
} else {
char **tmp = umem_alloc((nsearch + 1) *
sizeof (char *), UMEM_NOFAIL);
bcopy(searchdirs, tmp, nsearch *
sizeof (char *));
umem_free(searchdirs,
nsearch * sizeof (char *));
searchdirs = tmp;
}
searchdirs[nsearch++] = optarg;
break;
case 't':
max_txg = strtoull(optarg, NULL, 0);
if (max_txg < TXG_INITIAL) {
(void) fprintf(stderr, "incorrect txg "
"specified: %s\n", optarg);
usage();
}
break;
case 'U':
spa_config_path = optarg;
if (spa_config_path[0] != '/') {
(void) fprintf(stderr,
"cachefile must be an absolute path "
"(i.e. start with a slash)\n");
usage();
}
break;
case 'v':
verbose++;
break;
case 'V':
flags = ZFS_IMPORT_VERBATIM;
break;
case 'x':
vn_dumpdir = optarg;
break;
default:
usage();
break;
}
}
if (!dump_opt['e'] && searchdirs != NULL) {
(void) fprintf(stderr, "-p option requires use of -e\n");
usage();
}
if (dump_opt['d'] || dump_opt['r']) {
/* <pool>[/<dataset | objset id> is accepted */
if (argv[2] && (objset_str = strchr(argv[2], '/')) != NULL &&
objset_str++ != NULL) {
char *endptr;
errno = 0;
objset_id = strtoull(objset_str, &endptr, 0);
/* dataset 0 is the same as opening the pool */
if (errno == 0 && endptr != objset_str &&
objset_id != 0) {
target_is_spa = B_FALSE;
dataset_lookup = B_TRUE;
} else if (objset_id != 0) {
printf("failed to open objset %s "
"%llu %s", objset_str,
(u_longlong_t)objset_id,
strerror(errno));
exit(1);
}
/* normal dataset name not an objset ID */
if (endptr == objset_str) {
objset_id = -1;
}
}
}
#if defined(_LP64)
/*
* ZDB does not typically re-read blocks; therefore limit the ARC
* to 256 MB, which can be used entirely for metadata.
*/
zfs_arc_min = zfs_arc_meta_min = 2ULL << SPA_MAXBLOCKSHIFT;
zfs_arc_max = zfs_arc_meta_limit = 256 * 1024 * 1024;
#endif
/*
* "zdb -c" uses checksum-verifying scrub i/os which are async reads.
* "zdb -b" uses traversal prefetch which uses async reads.
* For good performance, let several of them be active at once.
*/
zfs_vdev_async_read_max_active = 10;
/*
* Disable reference tracking for better performance.
*/
reference_tracking_enable = B_FALSE;
/*
* Do not fail spa_load when spa_load_verify fails. This is needed
* to load non-idle pools.
*/
spa_load_verify_dryrun = B_TRUE;
kernel_init(SPA_MODE_READ);
if (dump_all)
verbose = MAX(verbose, 1);
for (c = 0; c < 256; c++) {
if (dump_all && strchr("AeEFklLOPrRSXy", c) == NULL)
dump_opt[c] = 1;
if (dump_opt[c])
dump_opt[c] += verbose;
}
libspl_assert_ok = (dump_opt['A'] == 1) || (dump_opt['A'] > 2);
zfs_recover = (dump_opt['A'] > 1);
argc -= optind;
argv += optind;
if (argc < 2 && dump_opt['R'])
usage();
if (dump_opt['E']) {
if (argc != 1)
usage();
zdb_embedded_block(argv[0]);
return (0);
}
if (argc < 1) {
if (!dump_opt['e'] && dump_opt['C']) {
dump_cachefile(spa_config_path);
return (0);
}
usage();
}
if (dump_opt['l'])
return (dump_label(argv[0]));
if (dump_opt['O']) {
if (argc != 2)
usage();
dump_opt['v'] = verbose + 3;
return (dump_path(argv[0], argv[1], NULL));
}
if (dump_opt['r']) {
if (argc != 3)
usage();
dump_opt['v'] = verbose;
error = dump_path(argv[0], argv[1], &object);
}
if (dump_opt['X'] || dump_opt['F'])
rewind = ZPOOL_DO_REWIND |
(dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0);
if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_LOAD_REQUEST_TXG, max_txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_LOAD_REWIND_POLICY, rewind) != 0)
fatal("internal error: %s", strerror(ENOMEM));
error = 0;
target = argv[0];
if (strpbrk(target, "/@") != NULL) {
size_t targetlen;
target_pool = strdup(target);
*strpbrk(target_pool, "/@") = '\0';
target_is_spa = B_FALSE;
targetlen = strlen(target);
if (targetlen && target[targetlen - 1] == '/')
target[targetlen - 1] = '\0';
} else {
target_pool = target;
}
if (dump_opt['e']) {
importargs_t args = { 0 };
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_TRUE;
error = zpool_find_config(NULL, target_pool, &cfg, &args,
&libzpool_config_ops);
if (error == 0) {
if (nvlist_add_nvlist(cfg,
ZPOOL_LOAD_POLICY, policy) != 0) {
fatal("can't open '%s': %s",
target, strerror(ENOMEM));
}
if (dump_opt['C'] > 1) {
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
/*
* Disable the activity check to allow examination of
* active pools.
*/
error = spa_import(target_pool, cfg, NULL,
flags | ZFS_IMPORT_SKIP_MMP);
}
}
if (searchdirs != NULL) {
umem_free(searchdirs, nsearch * sizeof (char *));
searchdirs = NULL;
}
/*
* import_checkpointed_state makes the assumption that the
* target pool that we pass it is already part of the spa
* namespace. Because of that we need to make sure to call
* it always after the -e option has been processed, which
* imports the pool to the namespace if it's not in the
* cachefile.
*/
char *checkpoint_pool = NULL;
char *checkpoint_target = NULL;
if (dump_opt['k']) {
checkpoint_pool = import_checkpointed_state(target, cfg,
&checkpoint_target);
if (checkpoint_target != NULL)
target = checkpoint_target;
}
if (cfg != NULL) {
nvlist_free(cfg);
cfg = NULL;
}
if (target_pool != target)
free(target_pool);
if (error == 0) {
if (dump_opt['k'] && (target_is_spa || dump_opt['R'])) {
ASSERT(checkpoint_pool != NULL);
ASSERT(checkpoint_target == NULL);
error = spa_open(checkpoint_pool, &spa, FTAG);
if (error != 0) {
fatal("Tried to open pool \"%s\" but "
"spa_open() failed with error %d\n",
checkpoint_pool, error);
}
} else if (target_is_spa || dump_opt['R'] || objset_id == 0) {
zdb_set_skip_mmp(target);
error = spa_open_rewind(target, &spa, FTAG, policy,
NULL);
if (error) {
/*
* If we're missing the log device then
* try opening the pool after clearing the
* log state.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL &&
spa->spa_log_state == SPA_LOG_MISSING) {
spa->spa_log_state = SPA_LOG_CLEAR;
error = 0;
}
mutex_exit(&spa_namespace_lock);
if (!error) {
error = spa_open_rewind(target, &spa,
FTAG, policy, NULL);
}
}
} else if (strpbrk(target, "#") != NULL) {
dsl_pool_t *dp;
error = dsl_pool_hold(target, FTAG, &dp);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
error = dump_bookmark(dp, target, B_TRUE, verbose > 1);
dsl_pool_rele(dp, FTAG);
if (error != 0) {
fatal("can't dump '%s': %s", target,
strerror(error));
}
return (error);
} else {
zdb_set_skip_mmp(target);
if (dataset_lookup == B_TRUE) {
/*
* Use the supplied id to get the name
* for open_objset.
*/
error = spa_open(target, &spa, FTAG);
if (error == 0) {
error = name_from_objset_id(spa,
objset_id, dsname);
spa_close(spa, FTAG);
if (error == 0)
target = dsname;
}
}
if (error == 0)
error = open_objset(target, FTAG, &os);
if (error == 0)
spa = dmu_objset_spa(os);
}
}
nvlist_free(policy);
if (error)
fatal("can't open '%s': %s", target, strerror(error));
/*
* Set the pool failure mode to panic in order to prevent the pool
* from suspending. A suspended I/O will have no way to resume and
* can prevent the zdb(8) command from terminating as expected.
*/
if (spa != NULL)
spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
argv++;
argc--;
if (dump_opt['r']) {
error = zdb_copy_object(os, object, argv[1]);
} else if (!dump_opt['R']) {
flagbits['d'] = ZOR_FLAG_DIRECTORY;
flagbits['f'] = ZOR_FLAG_PLAIN_FILE;
flagbits['m'] = ZOR_FLAG_SPACE_MAP;
flagbits['z'] = ZOR_FLAG_ZAP;
flagbits['A'] = ZOR_FLAG_ALL_TYPES;
if (argc > 0 && dump_opt['d']) {
zopt_object_args = argc;
zopt_object_ranges = calloc(zopt_object_args,
sizeof (zopt_object_range_t));
for (unsigned i = 0; i < zopt_object_args; i++) {
int err;
char *msg = NULL;
err = parse_object_range(argv[i],
&zopt_object_ranges[i], &msg);
if (err != 0)
fatal("Bad object or range: '%s': %s\n",
argv[i], msg ? msg : "");
}
} else if (argc > 0 && dump_opt['m']) {
zopt_metaslab_args = argc;
zopt_metaslab = calloc(zopt_metaslab_args,
sizeof (uint64_t));
for (unsigned i = 0; i < zopt_metaslab_args; i++) {
errno = 0;
zopt_metaslab[i] = strtoull(argv[i], NULL, 0);
if (zopt_metaslab[i] == 0 && errno != 0)
fatal("bad number %s: %s", argv[i],
strerror(errno));
}
}
if (os != NULL) {
dump_objset(os);
} else if (zopt_object_args > 0 && !dump_opt['m']) {
dump_objset(spa->spa_meta_objset);
} else {
dump_zpool(spa);
}
} else {
flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR;
flagbits['c'] = ZDB_FLAG_CHECKSUM;
flagbits['d'] = ZDB_FLAG_DECOMPRESS;
flagbits['e'] = ZDB_FLAG_BSWAP;
flagbits['g'] = ZDB_FLAG_GBH;
flagbits['i'] = ZDB_FLAG_INDIRECT;
flagbits['r'] = ZDB_FLAG_RAW;
flagbits['v'] = ZDB_FLAG_VERBOSE;
for (int i = 0; i < argc; i++)
zdb_read_block(argv[i], spa);
}
if (dump_opt['k']) {
free(checkpoint_pool);
if (!target_is_spa)
free(checkpoint_target);
}
if (os != NULL) {
close_objset(os, FTAG);
} else {
spa_close(spa, FTAG);
}
fuid_table_destroy();
dump_debug_buffer();
kernel_fini();
return (error);
}
diff --git a/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c b/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c
index 1563f5d2792c..6c009bdc1235 100644
--- a/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c
+++ b/sys/contrib/openzfs/cmd/zed/agents/zfs_retire.c
@@ -1,565 +1,566 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
*
* Copyright (c) 2016, Intel Corporation.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
*/
/*
* The ZFS retire agent is responsible for managing hot spares across all pools.
* When we see a device fault or a device removal, we try to open the associated
* pool and look for any hot spares. We iterate over any available hot spares
* and attempt a 'zpool replace' for each one.
*
* For vdevs diagnosed as faulty, the agent is also responsible for proactively
* marking the vdev FAULTY (for I/O errors) or DEGRADED (for checksum errors).
*/
#include <sys/fs/zfs.h>
#include <sys/fm/protocol.h>
#include <sys/fm/fs/zfs.h>
#include <libzutil.h>
#include <libzfs.h>
#include <string.h>
+#include <libgen.h>
#include "zfs_agents.h"
#include "fmd_api.h"
typedef struct zfs_retire_repaired {
struct zfs_retire_repaired *zrr_next;
uint64_t zrr_pool;
uint64_t zrr_vdev;
} zfs_retire_repaired_t;
typedef struct zfs_retire_data {
libzfs_handle_t *zrd_hdl;
zfs_retire_repaired_t *zrd_repaired;
} zfs_retire_data_t;
static void
zfs_retire_clear_data(fmd_hdl_t *hdl, zfs_retire_data_t *zdp)
{
zfs_retire_repaired_t *zrp;
while ((zrp = zdp->zrd_repaired) != NULL) {
zdp->zrd_repaired = zrp->zrr_next;
fmd_hdl_free(hdl, zrp, sizeof (zfs_retire_repaired_t));
}
}
/*
* Find a pool with a matching GUID.
*/
typedef struct find_cbdata {
uint64_t cb_guid;
zpool_handle_t *cb_zhp;
nvlist_t *cb_vdev;
} find_cbdata_t;
static int
find_pool(zpool_handle_t *zhp, void *data)
{
find_cbdata_t *cbp = data;
if (cbp->cb_guid ==
zpool_get_prop_int(zhp, ZPOOL_PROP_GUID, NULL)) {
cbp->cb_zhp = zhp;
return (1);
}
zpool_close(zhp);
return (0);
}
/*
* Find a vdev within a tree with a matching GUID.
*/
static nvlist_t *
find_vdev(libzfs_handle_t *zhdl, nvlist_t *nv, uint64_t search_guid)
{
uint64_t guid;
nvlist_t **child;
uint_t c, children;
nvlist_t *ret;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
guid == search_guid) {
fmd_hdl_debug(fmd_module_hdl("zfs-retire"),
"matched vdev %llu", guid);
return (nv);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
return (ret);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
return (ret);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = find_vdev(zhdl, child[c], search_guid)) != NULL)
return (ret);
}
return (NULL);
}
/*
* Given a (pool, vdev) GUID pair, find the matching pool and vdev.
*/
static zpool_handle_t *
find_by_guid(libzfs_handle_t *zhdl, uint64_t pool_guid, uint64_t vdev_guid,
nvlist_t **vdevp)
{
find_cbdata_t cb;
zpool_handle_t *zhp;
nvlist_t *config, *nvroot;
/*
* Find the corresponding pool and make sure the vdev still exists.
*/
cb.cb_guid = pool_guid;
if (zpool_iter(zhdl, find_pool, &cb) != 1)
return (NULL);
zhp = cb.cb_zhp;
config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) != 0) {
zpool_close(zhp);
return (NULL);
}
if (vdev_guid != 0) {
if ((*vdevp = find_vdev(zhdl, nvroot, vdev_guid)) == NULL) {
zpool_close(zhp);
return (NULL);
}
}
return (zhp);
}
/*
* Given a vdev, attempt to replace it with every known spare until one
* succeeds or we run out of devices to try.
* Return whether we were successful or not in replacing the device.
*/
static boolean_t
replace_with_spare(fmd_hdl_t *hdl, zpool_handle_t *zhp, nvlist_t *vdev)
{
nvlist_t *config, *nvroot, *replacement;
nvlist_t **spares;
uint_t s, nspares;
char *dev_name;
zprop_source_t source;
int ashift;
config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) != 0)
return (B_FALSE);
/*
* Find out if there are any hot spares available in the pool.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) != 0)
return (B_FALSE);
/*
* lookup "ashift" pool property, we may need it for the replacement
*/
ashift = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &source);
replacement = fmd_nvl_alloc(hdl, FMD_SLEEP);
(void) nvlist_add_string(replacement, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT);
dev_name = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
/*
* Try to replace each spare, ending when we successfully
* replace it.
*/
for (s = 0; s < nspares; s++) {
boolean_t rebuild = B_FALSE;
char *spare_name, *type;
if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
&spare_name) != 0)
continue;
/* prefer sequential resilvering for distributed spares */
if ((nvlist_lookup_string(spares[s], ZPOOL_CONFIG_TYPE,
&type) == 0) && strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0)
rebuild = B_TRUE;
/* if set, add the "ashift" pool property to the spare nvlist */
if (source != ZPROP_SRC_DEFAULT)
(void) nvlist_add_uint64(spares[s],
ZPOOL_CONFIG_ASHIFT, ashift);
(void) nvlist_add_nvlist_array(replacement,
ZPOOL_CONFIG_CHILDREN, &spares[s], 1);
fmd_hdl_debug(hdl, "zpool_vdev_replace '%s' with spare '%s'",
dev_name, zfs_basename(spare_name));
if (zpool_vdev_attach(zhp, dev_name, spare_name,
replacement, B_TRUE, rebuild) == 0) {
free(dev_name);
nvlist_free(replacement);
return (B_TRUE);
}
}
free(dev_name);
nvlist_free(replacement);
return (B_FALSE);
}
/*
* Repair this vdev if we had diagnosed a 'fault.fs.zfs.device' and
* ASRU is now usable. ZFS has found the device to be present and
* functioning.
*/
/*ARGSUSED*/
static void
zfs_vdev_repair(fmd_hdl_t *hdl, nvlist_t *nvl)
{
zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
zfs_retire_repaired_t *zrp;
uint64_t pool_guid, vdev_guid;
if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
&pool_guid) != 0 || nvlist_lookup_uint64(nvl,
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, &vdev_guid) != 0)
return;
/*
* Before checking the state of the ASRU, go through and see if we've
* already made an attempt to repair this ASRU. This list is cleared
* whenever we receive any kind of list event, and is designed to
* prevent us from generating a feedback loop when we attempt repairs
* against a faulted pool. The problem is that checking the unusable
* state of the ASRU can involve opening the pool, which can post
* statechange events but otherwise leave the pool in the faulted
* state. This list allows us to detect when a statechange event is
* due to our own request.
*/
for (zrp = zdp->zrd_repaired; zrp != NULL; zrp = zrp->zrr_next) {
if (zrp->zrr_pool == pool_guid &&
zrp->zrr_vdev == vdev_guid)
return;
}
zrp = fmd_hdl_alloc(hdl, sizeof (zfs_retire_repaired_t), FMD_SLEEP);
zrp->zrr_next = zdp->zrd_repaired;
zrp->zrr_pool = pool_guid;
zrp->zrr_vdev = vdev_guid;
zdp->zrd_repaired = zrp;
fmd_hdl_debug(hdl, "marking repaired vdev %llu on pool %llu",
vdev_guid, pool_guid);
}
/*ARGSUSED*/
static void
zfs_retire_recv(fmd_hdl_t *hdl, fmd_event_t *ep, nvlist_t *nvl,
const char *class)
{
uint64_t pool_guid, vdev_guid;
zpool_handle_t *zhp;
nvlist_t *resource, *fault;
nvlist_t **faults;
uint_t f, nfaults;
zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
libzfs_handle_t *zhdl = zdp->zrd_hdl;
boolean_t fault_device, degrade_device;
boolean_t is_repair;
char *scheme;
nvlist_t *vdev = NULL;
char *uuid;
int repair_done = 0;
boolean_t retire;
boolean_t is_disk;
vdev_aux_t aux;
uint64_t state = 0;
fmd_hdl_debug(hdl, "zfs_retire_recv: '%s'", class);
nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE, &state);
/*
* If this is a resource notifying us of device removal then simply
* check for an available spare and continue unless the device is a
* l2arc vdev, in which case we just offline it.
*/
if (strcmp(class, "resource.fs.zfs.removed") == 0 ||
(strcmp(class, "resource.fs.zfs.statechange") == 0 &&
(state == VDEV_STATE_REMOVED || state == VDEV_STATE_FAULTED))) {
char *devtype;
char *devname;
if (nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_POOL_GUID,
&pool_guid) != 0 ||
nvlist_lookup_uint64(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
&vdev_guid) != 0)
return;
if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
&vdev)) == NULL)
return;
devname = zpool_vdev_name(NULL, zhp, vdev, B_FALSE);
/* Can't replace l2arc with a spare: offline the device */
if (nvlist_lookup_string(nvl, FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
&devtype) == 0 && strcmp(devtype, VDEV_TYPE_L2CACHE) == 0) {
fmd_hdl_debug(hdl, "zpool_vdev_offline '%s'", devname);
zpool_vdev_offline(zhp, devname, B_TRUE);
} else if (!fmd_prop_get_int32(hdl, "spare_on_remove") ||
replace_with_spare(hdl, zhp, vdev) == B_FALSE) {
/* Could not handle with spare */
fmd_hdl_debug(hdl, "no spare for '%s'", devname);
}
free(devname);
zpool_close(zhp);
return;
}
if (strcmp(class, FM_LIST_RESOLVED_CLASS) == 0)
return;
/*
* Note: on Linux statechange events are more than just
* healthy ones so we need to confirm the actual state value.
*/
if (strcmp(class, "resource.fs.zfs.statechange") == 0 &&
state == VDEV_STATE_HEALTHY) {
zfs_vdev_repair(hdl, nvl);
return;
}
if (strcmp(class, "sysevent.fs.zfs.vdev_remove") == 0) {
zfs_vdev_repair(hdl, nvl);
return;
}
zfs_retire_clear_data(hdl, zdp);
if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0)
is_repair = B_TRUE;
else
is_repair = B_FALSE;
/*
* We subscribe to zfs faults as well as all repair events.
*/
if (nvlist_lookup_nvlist_array(nvl, FM_SUSPECT_FAULT_LIST,
&faults, &nfaults) != 0)
return;
for (f = 0; f < nfaults; f++) {
fault = faults[f];
fault_device = B_FALSE;
degrade_device = B_FALSE;
is_disk = B_FALSE;
if (nvlist_lookup_boolean_value(fault, FM_SUSPECT_RETIRE,
&retire) == 0 && retire == 0)
continue;
/*
* While we subscribe to fault.fs.zfs.*, we only take action
* for faults targeting a specific vdev (open failure or SERD
* failure). We also subscribe to fault.io.* events, so that
* faulty disks will be faulted in the ZFS configuration.
*/
if (fmd_nvl_class_match(hdl, fault, "fault.fs.zfs.vdev.io")) {
fault_device = B_TRUE;
} else if (fmd_nvl_class_match(hdl, fault,
"fault.fs.zfs.vdev.checksum")) {
degrade_device = B_TRUE;
} else if (fmd_nvl_class_match(hdl, fault,
"fault.fs.zfs.device")) {
fault_device = B_FALSE;
} else if (fmd_nvl_class_match(hdl, fault, "fault.io.*")) {
is_disk = B_TRUE;
fault_device = B_TRUE;
} else {
continue;
}
if (is_disk) {
continue;
} else {
/*
* This is a ZFS fault. Lookup the resource, and
* attempt to find the matching vdev.
*/
if (nvlist_lookup_nvlist(fault, FM_FAULT_RESOURCE,
&resource) != 0 ||
nvlist_lookup_string(resource, FM_FMRI_SCHEME,
&scheme) != 0)
continue;
if (strcmp(scheme, FM_FMRI_SCHEME_ZFS) != 0)
continue;
if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_POOL,
&pool_guid) != 0)
continue;
if (nvlist_lookup_uint64(resource, FM_FMRI_ZFS_VDEV,
&vdev_guid) != 0) {
if (is_repair)
vdev_guid = 0;
else
continue;
}
if ((zhp = find_by_guid(zhdl, pool_guid, vdev_guid,
&vdev)) == NULL)
continue;
aux = VDEV_AUX_ERR_EXCEEDED;
}
if (vdev_guid == 0) {
/*
* For pool-level repair events, clear the entire pool.
*/
fmd_hdl_debug(hdl, "zpool_clear of pool '%s'",
zpool_get_name(zhp));
(void) zpool_clear(zhp, NULL, NULL);
zpool_close(zhp);
continue;
}
/*
* If this is a repair event, then mark the vdev as repaired and
* continue.
*/
if (is_repair) {
repair_done = 1;
fmd_hdl_debug(hdl, "zpool_clear of pool '%s' vdev %llu",
zpool_get_name(zhp), vdev_guid);
(void) zpool_vdev_clear(zhp, vdev_guid);
zpool_close(zhp);
continue;
}
/*
* Actively fault the device if needed.
*/
if (fault_device)
(void) zpool_vdev_fault(zhp, vdev_guid, aux);
if (degrade_device)
(void) zpool_vdev_degrade(zhp, vdev_guid, aux);
if (fault_device || degrade_device)
fmd_hdl_debug(hdl, "zpool_vdev_%s: vdev %llu on '%s'",
fault_device ? "fault" : "degrade", vdev_guid,
zpool_get_name(zhp));
/*
* Attempt to substitute a hot spare.
*/
(void) replace_with_spare(hdl, zhp, vdev);
zpool_close(zhp);
}
if (strcmp(class, FM_LIST_REPAIRED_CLASS) == 0 && repair_done &&
nvlist_lookup_string(nvl, FM_SUSPECT_UUID, &uuid) == 0)
fmd_case_uuresolved(hdl, uuid);
}
static const fmd_hdl_ops_t fmd_ops = {
zfs_retire_recv, /* fmdo_recv */
NULL, /* fmdo_timeout */
NULL, /* fmdo_close */
NULL, /* fmdo_stats */
NULL, /* fmdo_gc */
};
static const fmd_prop_t fmd_props[] = {
{ "spare_on_remove", FMD_TYPE_BOOL, "true" },
{ NULL, 0, NULL }
};
static const fmd_hdl_info_t fmd_info = {
"ZFS Retire Agent", "1.0", &fmd_ops, fmd_props
};
void
_zfs_retire_init(fmd_hdl_t *hdl)
{
zfs_retire_data_t *zdp;
libzfs_handle_t *zhdl;
if ((zhdl = libzfs_init()) == NULL)
return;
if (fmd_hdl_register(hdl, FMD_API_VERSION, &fmd_info) != 0) {
libzfs_fini(zhdl);
return;
}
zdp = fmd_hdl_zalloc(hdl, sizeof (zfs_retire_data_t), FMD_SLEEP);
zdp->zrd_hdl = zhdl;
fmd_hdl_setspecific(hdl, zdp);
}
void
_zfs_retire_fini(fmd_hdl_t *hdl)
{
zfs_retire_data_t *zdp = fmd_hdl_getspecific(hdl);
if (zdp != NULL) {
zfs_retire_clear_data(hdl, zdp);
libzfs_fini(zdp->zrd_hdl);
fmd_hdl_free(hdl, zdp, sizeof (zfs_retire_data_t));
}
}
diff --git a/sys/contrib/openzfs/cmd/zed/zed.c b/sys/contrib/openzfs/cmd/zed/zed.c
index 0aa03fded468..e45176c00bf2 100644
--- a/sys/contrib/openzfs/cmd/zed/zed.c
+++ b/sys/contrib/openzfs/cmd/zed/zed.c
@@ -1,308 +1,308 @@
/*
* This file is part of the ZFS Event Daemon (ZED).
*
* Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049).
* Copyright (C) 2013-2014 Lawrence Livermore National Security, LLC.
* Refer to the OpenZFS git commit log for authoritative copyright attribution.
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License Version 1.0 (CDDL-1.0).
* You can obtain a copy of the license from the top-level file
* "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
* You may not use this file except in compliance with the license.
*/
#include <errno.h>
#include <fcntl.h>
#include <signal.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <unistd.h>
#include "zed.h"
#include "zed_conf.h"
#include "zed_event.h"
#include "zed_file.h"
#include "zed_log.h"
static volatile sig_atomic_t _got_exit = 0;
static volatile sig_atomic_t _got_hup = 0;
/*
* Signal handler for SIGINT & SIGTERM.
*/
static void
_exit_handler(int signum)
{
_got_exit = 1;
}
/*
* Signal handler for SIGHUP.
*/
static void
_hup_handler(int signum)
{
_got_hup = 1;
}
/*
* Register signal handlers.
*/
static void
_setup_sig_handlers(void)
{
struct sigaction sa;
if (sigemptyset(&sa.sa_mask) < 0)
zed_log_die("Failed to initialize sigset");
sa.sa_flags = SA_RESTART;
sa.sa_handler = SIG_IGN;
if (sigaction(SIGPIPE, &sa, NULL) < 0)
zed_log_die("Failed to ignore SIGPIPE");
sa.sa_handler = _exit_handler;
if (sigaction(SIGINT, &sa, NULL) < 0)
zed_log_die("Failed to register SIGINT handler");
if (sigaction(SIGTERM, &sa, NULL) < 0)
zed_log_die("Failed to register SIGTERM handler");
sa.sa_handler = _hup_handler;
if (sigaction(SIGHUP, &sa, NULL) < 0)
zed_log_die("Failed to register SIGHUP handler");
(void) sigaddset(&sa.sa_mask, SIGCHLD);
if (pthread_sigmask(SIG_BLOCK, &sa.sa_mask, NULL) < 0)
zed_log_die("Failed to block SIGCHLD");
}
/*
* Lock all current and future pages in the virtual memory address space.
* Access to locked pages will never be delayed by a page fault.
*
* EAGAIN is tested up to max_tries in case this is a transient error.
*
* Note that memory locks are not inherited by a child created via fork()
* and are automatically removed during an execve(). As such, this must
* be called after the daemon fork()s (when running in the background).
*/
static void
_lock_memory(void)
{
#if HAVE_MLOCKALL
int i = 0;
const int max_tries = 10;
for (i = 0; i < max_tries; i++) {
if (mlockall(MCL_CURRENT | MCL_FUTURE) == 0) {
zed_log_msg(LOG_INFO, "Locked all pages in memory");
return;
}
if (errno != EAGAIN)
break;
}
zed_log_die("Failed to lock memory pages: %s", strerror(errno));
#else /* HAVE_MLOCKALL */
zed_log_die("Failed to lock memory pages: mlockall() not supported");
#endif /* HAVE_MLOCKALL */
}
/*
* Start daemonization of the process including the double fork().
*
* The parent process will block here until _finish_daemonize() is called
* (in the grandchild process), at which point the parent process will exit.
* This prevents the parent process from exiting until initialization is
* complete.
*/
static void
_start_daemonize(void)
{
pid_t pid;
struct sigaction sa;
/* Create pipe for communicating with child during daemonization. */
zed_log_pipe_open();
/* Background process and ensure child is not process group leader. */
pid = fork();
if (pid < 0) {
zed_log_die("Failed to create child process: %s",
strerror(errno));
} else if (pid > 0) {
/* Close writes since parent will only read from pipe. */
zed_log_pipe_close_writes();
/* Wait for notification that daemonization is complete. */
zed_log_pipe_wait();
zed_log_pipe_close_reads();
_exit(EXIT_SUCCESS);
}
/* Close reads since child will only write to pipe. */
zed_log_pipe_close_reads();
/* Create independent session and detach from terminal. */
if (setsid() < 0)
zed_log_die("Failed to create new session: %s",
strerror(errno));
/* Prevent child from terminating on HUP when session leader exits. */
if (sigemptyset(&sa.sa_mask) < 0)
zed_log_die("Failed to initialize sigset");
sa.sa_flags = 0;
sa.sa_handler = SIG_IGN;
if (sigaction(SIGHUP, &sa, NULL) < 0)
zed_log_die("Failed to ignore SIGHUP");
/* Ensure process cannot re-acquire terminal. */
pid = fork();
if (pid < 0) {
zed_log_die("Failed to create grandchild process: %s",
strerror(errno));
} else if (pid > 0) {
_exit(EXIT_SUCCESS);
}
}
/*
* Finish daemonization of the process by closing stdin/stdout/stderr.
*
* This must be called at the end of initialization after all external
* communication channels are established and accessible.
*/
static void
_finish_daemonize(void)
{
int devnull;
/* Preserve fd 0/1/2, but discard data to/from stdin/stdout/stderr. */
devnull = open("/dev/null", O_RDWR);
if (devnull < 0)
zed_log_die("Failed to open /dev/null: %s", strerror(errno));
if (dup2(devnull, STDIN_FILENO) < 0)
zed_log_die("Failed to dup /dev/null onto stdin: %s",
strerror(errno));
if (dup2(devnull, STDOUT_FILENO) < 0)
zed_log_die("Failed to dup /dev/null onto stdout: %s",
strerror(errno));
if (dup2(devnull, STDERR_FILENO) < 0)
zed_log_die("Failed to dup /dev/null onto stderr: %s",
strerror(errno));
if ((devnull > STDERR_FILENO) && (close(devnull) < 0))
zed_log_die("Failed to close /dev/null: %s", strerror(errno));
/* Notify parent that daemonization is complete. */
zed_log_pipe_close_writes();
}
/*
* ZFS Event Daemon (ZED).
*/
int
main(int argc, char *argv[])
{
struct zed_conf zcp;
uint64_t saved_eid;
int64_t saved_etime[2];
zed_log_init(argv[0]);
zed_log_stderr_open(LOG_NOTICE);
zed_conf_init(&zcp);
zed_conf_parse_opts(&zcp, argc, argv);
if (zcp.do_verbose)
zed_log_stderr_open(LOG_INFO);
if (geteuid() != 0)
zed_log_die("Must be run as root");
zed_file_close_from(STDERR_FILENO + 1);
(void) umask(0);
if (chdir("/") < 0)
zed_log_die("Failed to change to root directory");
if (zed_conf_scan_dir(&zcp) < 0)
exit(EXIT_FAILURE);
if (!zcp.do_foreground) {
_start_daemonize();
zed_log_syslog_open(LOG_DAEMON);
}
_setup_sig_handlers();
if (zcp.do_memlock)
_lock_memory();
if ((zed_conf_write_pid(&zcp) < 0) && (!zcp.do_force))
exit(EXIT_FAILURE);
if (!zcp.do_foreground)
_finish_daemonize();
zed_log_msg(LOG_NOTICE,
"ZFS Event Daemon %s-%s (PID %d)",
ZFS_META_VERSION, ZFS_META_RELEASE, (int)getpid());
if (zed_conf_open_state(&zcp) < 0)
exit(EXIT_FAILURE);
if (zed_conf_read_state(&zcp, &saved_eid, saved_etime) < 0)
exit(EXIT_FAILURE);
idle:
/*
* If -I is specified, attempt to open /dev/zfs repeatedly until
* successful.
*/
do {
if (!zed_event_init(&zcp))
break;
/* Wait for some time and try again. tunable? */
sleep(30);
} while (!_got_exit && zcp.do_idle);
if (_got_exit)
goto out;
zed_event_seek(&zcp, saved_eid, saved_etime);
while (!_got_exit) {
int rv;
if (_got_hup) {
_got_hup = 0;
(void) zed_conf_scan_dir(&zcp);
}
rv = zed_event_service(&zcp);
/* ENODEV: When kernel module is unloaded (osx) */
- if (rv == ENODEV)
+ if (rv != 0)
break;
}
zed_log_msg(LOG_NOTICE, "Exiting");
zed_event_fini(&zcp);
if (zcp.do_idle && !_got_exit)
goto idle;
out:
zed_conf_destroy(&zcp);
zed_log_fini();
exit(EXIT_SUCCESS);
}
diff --git a/sys/contrib/openzfs/cmd/zed/zed_conf.c b/sys/contrib/openzfs/cmd/zed/zed_conf.c
index 2cf2311dbb42..59935102f123 100644
--- a/sys/contrib/openzfs/cmd/zed/zed_conf.c
+++ b/sys/contrib/openzfs/cmd/zed/zed_conf.c
@@ -1,705 +1,706 @@
/*
* This file is part of the ZFS Event Daemon (ZED).
*
* Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049).
* Copyright (C) 2013-2014 Lawrence Livermore National Security, LLC.
* Refer to the OpenZFS git commit log for authoritative copyright attribution.
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License Version 1.0 (CDDL-1.0).
* You can obtain a copy of the license from the top-level file
* "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
* You may not use this file except in compliance with the license.
*/
#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <limits.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include <sys/types.h>
#include <sys/stat.h>
#include <sys/uio.h>
#include <unistd.h>
#include "zed.h"
#include "zed_conf.h"
#include "zed_file.h"
#include "zed_log.h"
#include "zed_strings.h"
/*
* Initialise the configuration with default values.
*/
void
zed_conf_init(struct zed_conf *zcp)
{
memset(zcp, 0, sizeof (*zcp));
/* zcp->zfs_hdl opened in zed_event_init() */
/* zcp->zedlets created in zed_conf_scan_dir() */
zcp->pid_fd = -1; /* opened in zed_conf_write_pid() */
zcp->state_fd = -1; /* opened in zed_conf_open_state() */
zcp->zevent_fd = -1; /* opened in zed_event_init() */
zcp->max_jobs = 16;
if (!(zcp->pid_file = strdup(ZED_PID_FILE)) ||
!(zcp->zedlet_dir = strdup(ZED_ZEDLET_DIR)) ||
!(zcp->state_file = strdup(ZED_STATE_FILE)))
zed_log_die("Failed to create conf: %s", strerror(errno));
}
/*
* Destroy the configuration [zcp].
*
* Note: zfs_hdl & zevent_fd are destroyed via zed_event_fini().
*/
void
zed_conf_destroy(struct zed_conf *zcp)
{
if (zcp->state_fd >= 0) {
if (close(zcp->state_fd) < 0)
zed_log_msg(LOG_WARNING,
"Failed to close state file \"%s\": %s",
zcp->state_file, strerror(errno));
zcp->state_fd = -1;
}
if (zcp->pid_file) {
if ((unlink(zcp->pid_file) < 0) && (errno != ENOENT))
zed_log_msg(LOG_WARNING,
"Failed to remove PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
}
if (zcp->pid_fd >= 0) {
if (close(zcp->pid_fd) < 0)
zed_log_msg(LOG_WARNING,
"Failed to close PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
zcp->pid_fd = -1;
}
if (zcp->pid_file) {
free(zcp->pid_file);
zcp->pid_file = NULL;
}
if (zcp->zedlet_dir) {
free(zcp->zedlet_dir);
zcp->zedlet_dir = NULL;
}
if (zcp->state_file) {
free(zcp->state_file);
zcp->state_file = NULL;
}
if (zcp->zedlets) {
zed_strings_destroy(zcp->zedlets);
zcp->zedlets = NULL;
}
}
/*
* Display command-line help and exit.
*
* If [got_err] is 0, output to stdout and exit normally;
* otherwise, output to stderr and exit with a failure status.
*/
static void
_zed_conf_display_help(const char *prog, boolean_t got_err)
{
struct opt { const char *o, *d, *v; };
FILE *fp = got_err ? stderr : stdout;
struct opt *oo;
struct opt iopts[] = {
{ .o = "-h", .d = "Display help" },
{ .o = "-L", .d = "Display license information" },
{ .o = "-V", .d = "Display version information" },
{},
};
struct opt nopts[] = {
{ .o = "-v", .d = "Be verbose" },
{ .o = "-f", .d = "Force daemon to run" },
{ .o = "-F", .d = "Run daemon in the foreground" },
{ .o = "-I",
.d = "Idle daemon until kernel module is (re)loaded" },
{ .o = "-M", .d = "Lock all pages in memory" },
{ .o = "-P", .d = "$PATH for ZED to use (only used by ZTS)" },
{ .o = "-Z", .d = "Zero state file" },
{},
};
struct opt vopts[] = {
{ .o = "-d DIR", .d = "Read enabled ZEDLETs from DIR.",
.v = ZED_ZEDLET_DIR },
{ .o = "-p FILE", .d = "Write daemon's PID to FILE.",
.v = ZED_PID_FILE },
{ .o = "-s FILE", .d = "Write daemon's state to FILE.",
.v = ZED_STATE_FILE },
{ .o = "-j JOBS", .d = "Start at most JOBS at once.",
.v = "16" },
{},
};
fprintf(fp, "Usage: %s [OPTION]...\n", (prog ? prog : "zed"));
fprintf(fp, "\n");
for (oo = iopts; oo->o; ++oo)
fprintf(fp, " %*s %s\n", -8, oo->o, oo->d);
fprintf(fp, "\n");
for (oo = nopts; oo->o; ++oo)
fprintf(fp, " %*s %s\n", -8, oo->o, oo->d);
fprintf(fp, "\n");
for (oo = vopts; oo->o; ++oo)
fprintf(fp, " %*s %s [%s]\n", -8, oo->o, oo->d, oo->v);
fprintf(fp, "\n");
exit(got_err ? EXIT_FAILURE : EXIT_SUCCESS);
}
/*
* Display license information to stdout and exit.
*/
static void
_zed_conf_display_license(void)
{
printf(
"The ZFS Event Daemon (ZED) is distributed under the terms of the\n"
" Common Development and Distribution License (CDDL-1.0)\n"
" <http://opensource.org/licenses/CDDL-1.0>.\n"
"\n"
"Developed at Lawrence Livermore National Laboratory"
" (LLNL-CODE-403049).\n"
"\n");
exit(EXIT_SUCCESS);
}
/*
* Display version information to stdout and exit.
*/
static void
_zed_conf_display_version(void)
{
printf("%s-%s-%s\n",
ZFS_META_NAME, ZFS_META_VERSION, ZFS_META_RELEASE);
exit(EXIT_SUCCESS);
}
/*
* Copy the [path] string to the [resultp] ptr.
* If [path] is not an absolute path, prefix it with the current working dir.
* If [resultp] is non-null, free its existing string before assignment.
*/
static void
_zed_conf_parse_path(char **resultp, const char *path)
{
char buf[PATH_MAX];
assert(resultp != NULL);
assert(path != NULL);
if (*resultp)
free(*resultp);
if (path[0] == '/') {
*resultp = strdup(path);
} else {
if (!getcwd(buf, sizeof (buf)))
zed_log_die("Failed to get current working dir: %s",
strerror(errno));
if (strlcat(buf, "/", sizeof (buf)) >= sizeof (buf) ||
strlcat(buf, path, sizeof (buf)) >= sizeof (buf))
zed_log_die("Failed to copy path: %s",
strerror(ENAMETOOLONG));
*resultp = strdup(buf);
}
if (!*resultp)
zed_log_die("Failed to copy path: %s", strerror(ENOMEM));
}
/*
* Parse the command-line options into the configuration [zcp].
*/
void
zed_conf_parse_opts(struct zed_conf *zcp, int argc, char **argv)
{
const char * const opts = ":hLVd:p:P:s:vfFMZIj:";
int opt;
unsigned long raw;
if (!zcp || !argv || !argv[0])
zed_log_die("Failed to parse options: Internal error");
opterr = 0; /* suppress default getopt err msgs */
while ((opt = getopt(argc, argv, opts)) != -1) {
switch (opt) {
case 'h':
_zed_conf_display_help(argv[0], B_FALSE);
break;
case 'L':
_zed_conf_display_license();
break;
case 'V':
_zed_conf_display_version();
break;
case 'd':
_zed_conf_parse_path(&zcp->zedlet_dir, optarg);
break;
case 'I':
zcp->do_idle = 1;
break;
case 'p':
_zed_conf_parse_path(&zcp->pid_file, optarg);
break;
case 'P':
_zed_conf_parse_path(&zcp->path, optarg);
break;
case 's':
_zed_conf_parse_path(&zcp->state_file, optarg);
break;
case 'v':
zcp->do_verbose = 1;
break;
case 'f':
zcp->do_force = 1;
break;
case 'F':
zcp->do_foreground = 1;
break;
case 'M':
zcp->do_memlock = 1;
break;
case 'Z':
zcp->do_zero = 1;
break;
case 'j':
errno = 0;
raw = strtoul(optarg, NULL, 0);
if (errno == ERANGE || raw > INT16_MAX) {
zed_log_die("%lu is too many jobs", raw);
} if (raw == 0) {
zed_log_die("0 jobs makes no sense");
} else {
zcp->max_jobs = raw;
}
break;
case '?':
default:
if (optopt == '?')
_zed_conf_display_help(argv[0], B_FALSE);
fprintf(stderr, "%s: Invalid option '-%c'\n\n",
argv[0], optopt);
_zed_conf_display_help(argv[0], B_TRUE);
break;
}
}
}
/*
* Scan the [zcp] zedlet_dir for files to exec based on the event class.
* Files must be executable by user, but not writable by group or other.
* Dotfiles are ignored.
*
* Return 0 on success with an updated set of zedlets,
* or -1 on error with errno set.
*/
int
zed_conf_scan_dir(struct zed_conf *zcp)
{
zed_strings_t *zedlets;
DIR *dirp;
struct dirent *direntp;
char pathname[PATH_MAX];
struct stat st;
int n;
if (!zcp) {
errno = EINVAL;
zed_log_msg(LOG_ERR, "Failed to scan zedlet dir: %s",
strerror(errno));
return (-1);
}
zedlets = zed_strings_create();
if (!zedlets) {
errno = ENOMEM;
zed_log_msg(LOG_WARNING, "Failed to scan dir \"%s\": %s",
zcp->zedlet_dir, strerror(errno));
return (-1);
}
dirp = opendir(zcp->zedlet_dir);
if (!dirp) {
int errno_bak = errno;
zed_log_msg(LOG_WARNING, "Failed to open dir \"%s\": %s",
zcp->zedlet_dir, strerror(errno));
zed_strings_destroy(zedlets);
errno = errno_bak;
return (-1);
}
while ((direntp = readdir(dirp))) {
if (direntp->d_name[0] == '.')
continue;
n = snprintf(pathname, sizeof (pathname),
"%s/%s", zcp->zedlet_dir, direntp->d_name);
if ((n < 0) || (n >= sizeof (pathname))) {
zed_log_msg(LOG_WARNING, "Failed to stat \"%s\": %s",
direntp->d_name, strerror(ENAMETOOLONG));
continue;
}
if (stat(pathname, &st) < 0) {
zed_log_msg(LOG_WARNING, "Failed to stat \"%s\": %s",
pathname, strerror(errno));
continue;
}
if (!S_ISREG(st.st_mode)) {
zed_log_msg(LOG_INFO,
"Ignoring \"%s\": not a regular file",
direntp->d_name);
continue;
}
if ((st.st_uid != 0) && !zcp->do_force) {
zed_log_msg(LOG_NOTICE,
"Ignoring \"%s\": not owned by root",
direntp->d_name);
continue;
}
if (!(st.st_mode & S_IXUSR)) {
zed_log_msg(LOG_INFO,
"Ignoring \"%s\": not executable by user",
direntp->d_name);
continue;
}
if ((st.st_mode & S_IWGRP) && !zcp->do_force) {
zed_log_msg(LOG_NOTICE,
"Ignoring \"%s\": writable by group",
direntp->d_name);
continue;
}
if ((st.st_mode & S_IWOTH) && !zcp->do_force) {
zed_log_msg(LOG_NOTICE,
"Ignoring \"%s\": writable by other",
direntp->d_name);
continue;
}
if (zed_strings_add(zedlets, NULL, direntp->d_name) < 0) {
zed_log_msg(LOG_WARNING,
"Failed to register \"%s\": %s",
direntp->d_name, strerror(errno));
continue;
}
if (zcp->do_verbose)
zed_log_msg(LOG_INFO,
"Registered zedlet \"%s\"", direntp->d_name);
}
if (closedir(dirp) < 0) {
int errno_bak = errno;
zed_log_msg(LOG_WARNING, "Failed to close dir \"%s\": %s",
zcp->zedlet_dir, strerror(errno));
zed_strings_destroy(zedlets);
errno = errno_bak;
return (-1);
}
if (zcp->zedlets)
zed_strings_destroy(zcp->zedlets);
zcp->zedlets = zedlets;
return (0);
}
/*
* Write the PID file specified in [zcp].
* Return 0 on success, -1 on error.
*
* This must be called after fork()ing to become a daemon (so the correct PID
* is recorded), but before daemonization is complete and the parent process
* exits (for synchronization with systemd).
*/
int
zed_conf_write_pid(struct zed_conf *zcp)
{
char buf[PATH_MAX];
int n;
char *p;
mode_t mask;
int rv;
if (!zcp || !zcp->pid_file) {
errno = EINVAL;
zed_log_msg(LOG_ERR, "Failed to create PID file: %s",
strerror(errno));
return (-1);
}
assert(zcp->pid_fd == -1);
/*
* Create PID file directory if needed.
*/
n = strlcpy(buf, zcp->pid_file, sizeof (buf));
if (n >= sizeof (buf)) {
errno = ENAMETOOLONG;
zed_log_msg(LOG_ERR, "Failed to create PID file: %s",
strerror(errno));
goto err;
}
p = strrchr(buf, '/');
if (p)
*p = '\0';
if ((mkdirp(buf, 0755) < 0) && (errno != EEXIST)) {
zed_log_msg(LOG_ERR, "Failed to create directory \"%s\": %s",
buf, strerror(errno));
goto err;
}
/*
* Obtain PID file lock.
*/
mask = umask(0);
umask(mask | 022);
zcp->pid_fd = open(zcp->pid_file, O_RDWR | O_CREAT | O_CLOEXEC, 0644);
umask(mask);
if (zcp->pid_fd < 0) {
zed_log_msg(LOG_ERR, "Failed to open PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
goto err;
}
rv = zed_file_lock(zcp->pid_fd);
if (rv < 0) {
zed_log_msg(LOG_ERR, "Failed to lock PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
goto err;
} else if (rv > 0) {
pid_t pid = zed_file_is_locked(zcp->pid_fd);
if (pid < 0) {
zed_log_msg(LOG_ERR,
"Failed to test lock on PID file \"%s\"",
zcp->pid_file);
} else if (pid > 0) {
zed_log_msg(LOG_ERR,
"Found PID %d bound to PID file \"%s\"",
pid, zcp->pid_file);
} else {
zed_log_msg(LOG_ERR,
"Inconsistent lock state on PID file \"%s\"",
zcp->pid_file);
}
goto err;
}
/*
* Write PID file.
*/
n = snprintf(buf, sizeof (buf), "%d\n", (int)getpid());
if ((n < 0) || (n >= sizeof (buf))) {
errno = ERANGE;
zed_log_msg(LOG_ERR, "Failed to write PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
} else if (write(zcp->pid_fd, buf, n) != n) {
zed_log_msg(LOG_ERR, "Failed to write PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
} else if (fdatasync(zcp->pid_fd) < 0) {
zed_log_msg(LOG_ERR, "Failed to sync PID file \"%s\": %s",
zcp->pid_file, strerror(errno));
} else {
return (0);
}
err:
if (zcp->pid_fd >= 0) {
(void) close(zcp->pid_fd);
zcp->pid_fd = -1;
}
return (-1);
}
/*
* Open and lock the [zcp] state_file.
* Return 0 on success, -1 on error.
*
* FIXME: Move state information into kernel.
*/
int
zed_conf_open_state(struct zed_conf *zcp)
{
char dirbuf[PATH_MAX];
int n;
char *p;
int rv;
if (!zcp || !zcp->state_file) {
errno = EINVAL;
zed_log_msg(LOG_ERR, "Failed to open state file: %s",
strerror(errno));
return (-1);
}
n = strlcpy(dirbuf, zcp->state_file, sizeof (dirbuf));
if (n >= sizeof (dirbuf)) {
errno = ENAMETOOLONG;
zed_log_msg(LOG_WARNING, "Failed to open state file: %s",
strerror(errno));
return (-1);
}
p = strrchr(dirbuf, '/');
if (p)
*p = '\0';
if ((mkdirp(dirbuf, 0755) < 0) && (errno != EEXIST)) {
zed_log_msg(LOG_WARNING,
"Failed to create directory \"%s\": %s",
dirbuf, strerror(errno));
return (-1);
}
if (zcp->state_fd >= 0) {
if (close(zcp->state_fd) < 0) {
zed_log_msg(LOG_WARNING,
"Failed to close state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
}
if (zcp->do_zero)
(void) unlink(zcp->state_file);
zcp->state_fd = open(zcp->state_file,
O_RDWR | O_CREAT | O_CLOEXEC, 0644);
if (zcp->state_fd < 0) {
zed_log_msg(LOG_WARNING, "Failed to open state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
rv = zed_file_lock(zcp->state_fd);
if (rv < 0) {
zed_log_msg(LOG_WARNING, "Failed to lock state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
if (rv > 0) {
pid_t pid = zed_file_is_locked(zcp->state_fd);
if (pid < 0) {
zed_log_msg(LOG_WARNING,
"Failed to test lock on state file \"%s\"",
zcp->state_file);
} else if (pid > 0) {
zed_log_msg(LOG_WARNING,
"Found PID %d bound to state file \"%s\"",
pid, zcp->state_file);
} else {
zed_log_msg(LOG_WARNING,
"Inconsistent lock state on state file \"%s\"",
zcp->state_file);
}
return (-1);
}
return (0);
}
/*
* Read the opened [zcp] state_file to obtain the eid & etime of the last event
* processed. Write the state from the last event to the [eidp] & [etime] args
* passed by reference. Note that etime[] is an array of size 2.
* Return 0 on success, -1 on error.
*/
int
zed_conf_read_state(struct zed_conf *zcp, uint64_t *eidp, int64_t etime[])
{
ssize_t len;
struct iovec iov[3];
ssize_t n;
if (!zcp || !eidp || !etime) {
errno = EINVAL;
zed_log_msg(LOG_ERR,
"Failed to read state file: %s", strerror(errno));
return (-1);
}
if (lseek(zcp->state_fd, 0, SEEK_SET) == (off_t)-1) {
zed_log_msg(LOG_WARNING,
"Failed to reposition state file offset: %s",
strerror(errno));
return (-1);
}
len = 0;
iov[0].iov_base = eidp;
len += iov[0].iov_len = sizeof (*eidp);
iov[1].iov_base = &etime[0];
len += iov[1].iov_len = sizeof (etime[0]);
iov[2].iov_base = &etime[1];
len += iov[2].iov_len = sizeof (etime[1]);
n = readv(zcp->state_fd, iov, 3);
if (n == 0) {
*eidp = 0;
} else if (n < 0) {
zed_log_msg(LOG_WARNING,
"Failed to read state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
} else if (n != len) {
errno = EIO;
zed_log_msg(LOG_WARNING,
"Failed to read state file \"%s\": Read %d of %d bytes",
zcp->state_file, n, len);
return (-1);
}
return (0);
}
/*
* Write the [eid] & [etime] of the last processed event to the opened
* [zcp] state_file. Note that etime[] is an array of size 2.
* Return 0 on success, -1 on error.
*/
int
zed_conf_write_state(struct zed_conf *zcp, uint64_t eid, int64_t etime[])
{
ssize_t len;
struct iovec iov[3];
ssize_t n;
if (!zcp) {
errno = EINVAL;
zed_log_msg(LOG_ERR,
"Failed to write state file: %s", strerror(errno));
return (-1);
}
if (lseek(zcp->state_fd, 0, SEEK_SET) == (off_t)-1) {
zed_log_msg(LOG_WARNING,
"Failed to reposition state file offset: %s",
strerror(errno));
return (-1);
}
len = 0;
iov[0].iov_base = &eid;
len += iov[0].iov_len = sizeof (eid);
iov[1].iov_base = &etime[0];
len += iov[1].iov_len = sizeof (etime[0]);
iov[2].iov_base = &etime[1];
len += iov[2].iov_len = sizeof (etime[1]);
n = writev(zcp->state_fd, iov, 3);
if (n < 0) {
zed_log_msg(LOG_WARNING,
"Failed to write state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
if (n != len) {
errno = EIO;
zed_log_msg(LOG_WARNING,
"Failed to write state file \"%s\": Wrote %d of %d bytes",
zcp->state_file, n, len);
return (-1);
}
if (fdatasync(zcp->state_fd) < 0) {
zed_log_msg(LOG_WARNING,
"Failed to sync state file \"%s\": %s",
zcp->state_file, strerror(errno));
return (-1);
}
return (0);
}
diff --git a/sys/contrib/openzfs/cmd/zed/zed_exec.c b/sys/contrib/openzfs/cmd/zed/zed_exec.c
index 1eecfa0a92c4..03dcd03aceb7 100644
--- a/sys/contrib/openzfs/cmd/zed/zed_exec.c
+++ b/sys/contrib/openzfs/cmd/zed/zed_exec.c
@@ -1,368 +1,370 @@
/*
* This file is part of the ZFS Event Daemon (ZED).
*
* Developed at Lawrence Livermore National Laboratory (LLNL-CODE-403049).
* Copyright (C) 2013-2014 Lawrence Livermore National Security, LLC.
* Refer to the OpenZFS git commit log for authoritative copyright attribution.
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License Version 1.0 (CDDL-1.0).
* You can obtain a copy of the license from the top-level file
* "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
* You may not use this file except in compliance with the license.
*/
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <fcntl.h>
#include <stdlib.h>
#include <string.h>
#include <stddef.h>
#include <sys/avl.h>
#include <sys/resource.h>
#include <sys/stat.h>
#include <sys/wait.h>
#include <time.h>
#include <unistd.h>
#include <pthread.h>
+#include <signal.h>
+
#include "zed_exec.h"
#include "zed_log.h"
#include "zed_strings.h"
#define ZEVENT_FILENO 3
struct launched_process_node {
avl_node_t node;
pid_t pid;
uint64_t eid;
char *name;
};
static int
_launched_process_node_compare(const void *x1, const void *x2)
{
pid_t p1;
pid_t p2;
assert(x1 != NULL);
assert(x2 != NULL);
p1 = ((const struct launched_process_node *) x1)->pid;
p2 = ((const struct launched_process_node *) x2)->pid;
if (p1 < p2)
return (-1);
else if (p1 == p2)
return (0);
else
return (1);
}
static pthread_t _reap_children_tid = (pthread_t)-1;
static volatile boolean_t _reap_children_stop;
static avl_tree_t _launched_processes;
static pthread_mutex_t _launched_processes_lock = PTHREAD_MUTEX_INITIALIZER;
static int16_t _launched_processes_limit;
/*
* Create an environment string array for passing to execve() using the
* NAME=VALUE strings in container [zsp].
* Return a newly-allocated environment, or NULL on error.
*/
static char **
_zed_exec_create_env(zed_strings_t *zsp)
{
int num_ptrs;
int buflen;
char *buf;
char **pp;
char *p;
const char *q;
int i;
int len;
num_ptrs = zed_strings_count(zsp) + 1;
buflen = num_ptrs * sizeof (char *);
for (q = zed_strings_first(zsp); q; q = zed_strings_next(zsp))
buflen += strlen(q) + 1;
buf = calloc(1, buflen);
if (!buf)
return (NULL);
pp = (char **)buf;
p = buf + (num_ptrs * sizeof (char *));
i = 0;
for (q = zed_strings_first(zsp); q; q = zed_strings_next(zsp)) {
pp[i] = p;
len = strlen(q) + 1;
memcpy(p, q, len);
p += len;
i++;
}
pp[i] = NULL;
assert(buf + buflen == p);
return ((char **)buf);
}
/*
* Fork a child process to handle event [eid]. The program [prog]
* in directory [dir] is executed with the environment [env].
*
* The file descriptor [zfd] is the zevent_fd used to track the
* current cursor location within the zevent nvlist.
*/
static void
_zed_exec_fork_child(uint64_t eid, const char *dir, const char *prog,
char *env[], int zfd, boolean_t in_foreground)
{
char path[PATH_MAX];
int n;
pid_t pid;
int fd;
struct launched_process_node *node;
sigset_t mask;
struct timespec launch_timeout =
{ .tv_sec = 0, .tv_nsec = 200 * 1000 * 1000, };
assert(dir != NULL);
assert(prog != NULL);
assert(env != NULL);
assert(zfd >= 0);
while (__atomic_load_n(&_launched_processes_limit,
__ATOMIC_SEQ_CST) <= 0)
(void) nanosleep(&launch_timeout, NULL);
n = snprintf(path, sizeof (path), "%s/%s", dir, prog);
if ((n < 0) || (n >= sizeof (path))) {
zed_log_msg(LOG_WARNING,
"Failed to fork \"%s\" for eid=%llu: %s",
prog, eid, strerror(ENAMETOOLONG));
return;
}
(void) pthread_mutex_lock(&_launched_processes_lock);
pid = fork();
if (pid < 0) {
(void) pthread_mutex_unlock(&_launched_processes_lock);
zed_log_msg(LOG_WARNING,
"Failed to fork \"%s\" for eid=%llu: %s",
prog, eid, strerror(errno));
return;
} else if (pid == 0) {
(void) sigemptyset(&mask);
(void) sigprocmask(SIG_SETMASK, &mask, NULL);
(void) umask(022);
if (in_foreground && /* we're already devnulled if daemonised */
(fd = open("/dev/null", O_RDWR | O_CLOEXEC)) != -1) {
(void) dup2(fd, STDIN_FILENO);
(void) dup2(fd, STDOUT_FILENO);
(void) dup2(fd, STDERR_FILENO);
}
(void) dup2(zfd, ZEVENT_FILENO);
execle(path, prog, NULL, env);
_exit(127);
}
/* parent process */
node = calloc(1, sizeof (*node));
if (node) {
node->pid = pid;
node->eid = eid;
node->name = strdup(prog);
avl_add(&_launched_processes, node);
}
(void) pthread_mutex_unlock(&_launched_processes_lock);
__atomic_sub_fetch(&_launched_processes_limit, 1, __ATOMIC_SEQ_CST);
zed_log_msg(LOG_INFO, "Invoking \"%s\" eid=%llu pid=%d",
prog, eid, pid);
}
static void
_nop(int sig)
{}
static void *
_reap_children(void *arg)
{
struct launched_process_node node, *pnode;
pid_t pid;
int status;
struct rusage usage;
struct sigaction sa = {};
(void) sigfillset(&sa.sa_mask);
(void) sigdelset(&sa.sa_mask, SIGCHLD);
(void) pthread_sigmask(SIG_SETMASK, &sa.sa_mask, NULL);
(void) sigemptyset(&sa.sa_mask);
sa.sa_handler = _nop;
sa.sa_flags = SA_NOCLDSTOP;
(void) sigaction(SIGCHLD, &sa, NULL);
for (_reap_children_stop = B_FALSE; !_reap_children_stop; ) {
(void) pthread_mutex_lock(&_launched_processes_lock);
pid = wait4(0, &status, WNOHANG, &usage);
if (pid == 0 || pid == (pid_t)-1) {
(void) pthread_mutex_unlock(&_launched_processes_lock);
if (pid == 0 || errno == ECHILD)
pause();
else if (errno != EINTR)
zed_log_msg(LOG_WARNING,
"Failed to wait for children: %s",
strerror(errno));
} else {
memset(&node, 0, sizeof (node));
node.pid = pid;
pnode = avl_find(&_launched_processes, &node, NULL);
if (pnode) {
memcpy(&node, pnode, sizeof (node));
avl_remove(&_launched_processes, pnode);
free(pnode);
}
(void) pthread_mutex_unlock(&_launched_processes_lock);
__atomic_add_fetch(&_launched_processes_limit, 1,
__ATOMIC_SEQ_CST);
usage.ru_utime.tv_sec += usage.ru_stime.tv_sec;
usage.ru_utime.tv_usec += usage.ru_stime.tv_usec;
usage.ru_utime.tv_sec +=
usage.ru_utime.tv_usec / (1000 * 1000);
usage.ru_utime.tv_usec %= 1000 * 1000;
if (WIFEXITED(status)) {
zed_log_msg(LOG_INFO,
"Finished \"%s\" eid=%llu pid=%d "
"time=%llu.%06us exit=%d",
node.name, node.eid, pid,
(unsigned long long) usage.ru_utime.tv_sec,
(unsigned int) usage.ru_utime.tv_usec,
WEXITSTATUS(status));
} else if (WIFSIGNALED(status)) {
zed_log_msg(LOG_INFO,
"Finished \"%s\" eid=%llu pid=%d "
"time=%llu.%06us sig=%d/%s",
node.name, node.eid, pid,
(unsigned long long) usage.ru_utime.tv_sec,
(unsigned int) usage.ru_utime.tv_usec,
WTERMSIG(status),
strsignal(WTERMSIG(status)));
} else {
zed_log_msg(LOG_INFO,
"Finished \"%s\" eid=%llu pid=%d "
"time=%llu.%06us status=0x%X",
node.name, node.eid,
(unsigned long long) usage.ru_utime.tv_sec,
(unsigned int) usage.ru_utime.tv_usec,
(unsigned int) status);
}
free(node.name);
}
}
return (NULL);
}
void
zed_exec_fini(void)
{
struct launched_process_node *node;
void *ck = NULL;
if (_reap_children_tid == (pthread_t)-1)
return;
_reap_children_stop = B_TRUE;
(void) pthread_kill(_reap_children_tid, SIGCHLD);
(void) pthread_join(_reap_children_tid, NULL);
while ((node = avl_destroy_nodes(&_launched_processes, &ck)) != NULL) {
free(node->name);
free(node);
}
avl_destroy(&_launched_processes);
(void) pthread_mutex_destroy(&_launched_processes_lock);
(void) pthread_mutex_init(&_launched_processes_lock, NULL);
_reap_children_tid = (pthread_t)-1;
}
/*
* Process the event [eid] by synchronously invoking all zedlets with a
* matching class prefix.
*
* Each executable in [zcp->zedlets] from the directory [zcp->zedlet_dir]
* is matched against the event's [class], [subclass], and the "all" class
* (which matches all events).
* Every zedlet with a matching class prefix is invoked.
* The NAME=VALUE strings in [envs] will be passed to the zedlet as
* environment variables.
*
* The file descriptor [zcp->zevent_fd] is the zevent_fd used to track the
* current cursor location within the zevent nvlist.
*
* Return 0 on success, -1 on error.
*/
int
zed_exec_process(uint64_t eid, const char *class, const char *subclass,
struct zed_conf *zcp, zed_strings_t *envs)
{
const char *class_strings[4];
const char *allclass = "all";
const char **csp;
const char *z;
char **e;
int n;
if (!zcp->zedlet_dir || !zcp->zedlets || !envs || zcp->zevent_fd < 0)
return (-1);
if (_reap_children_tid == (pthread_t)-1) {
_launched_processes_limit = zcp->max_jobs;
if (pthread_create(&_reap_children_tid, NULL,
_reap_children, NULL) != 0)
return (-1);
pthread_setname_np(_reap_children_tid, "reap ZEDLETs");
avl_create(&_launched_processes, _launched_process_node_compare,
sizeof (struct launched_process_node),
offsetof(struct launched_process_node, node));
}
csp = class_strings;
if (class)
*csp++ = class;
if (subclass)
*csp++ = subclass;
if (allclass)
*csp++ = allclass;
*csp = NULL;
e = _zed_exec_create_env(envs);
for (z = zed_strings_first(zcp->zedlets); z;
z = zed_strings_next(zcp->zedlets)) {
for (csp = class_strings; *csp; csp++) {
n = strlen(*csp);
if ((strncmp(z, *csp, n) == 0) && !isalpha(z[n]))
_zed_exec_fork_child(eid, zcp->zedlet_dir,
z, e, zcp->zevent_fd, zcp->do_foreground);
}
}
free(e);
return (0);
}
diff --git a/sys/contrib/openzfs/cmd/zfs/zfs_main.c b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
index 1a49d44f6086..d05cb29c69d6 100644
--- a/sys/contrib/openzfs/cmd/zfs/zfs_main.c
+++ b/sys/contrib/openzfs/cmd/zfs/zfs_main.c
@@ -1,8792 +1,8793 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2012 Milan Jurik. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright 2019 Joyent, Inc.
* Copyright (c) 2019, 2020 by Christian Schwarz. All rights reserved.
*/
#include <assert.h>
#include <ctype.h>
#include <sys/debug.h>
#include <errno.h>
#include <getopt.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
#include <libnvpair.h>
#include <locale.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <fcntl.h>
#include <zone.h>
#include <grp.h>
#include <pwd.h>
#include <umem.h>
#include <pthread.h>
#include <signal.h>
#include <sys/list.h>
#include <sys/mkdev.h>
#include <sys/mntent.h>
#include <sys/mnttab.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/fs/zfs.h>
#include <sys/systeminfo.h>
#include <sys/types.h>
#include <time.h>
#include <sys/zfs_project.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include <zfs_prop.h>
#include <zfs_deleg.h>
#include <libzutil.h>
#ifdef HAVE_IDMAP
#include <aclutils.h>
#include <directory.h>
#endif /* HAVE_IDMAP */
#include "zfs_iter.h"
#include "zfs_util.h"
#include "zfs_comutil.h"
#include "zfs_projectutil.h"
libzfs_handle_t *g_zfs;
static char history_str[HIS_MAX_RECORD_LEN];
static boolean_t log_history = B_TRUE;
static int zfs_do_clone(int argc, char **argv);
static int zfs_do_create(int argc, char **argv);
static int zfs_do_destroy(int argc, char **argv);
static int zfs_do_get(int argc, char **argv);
static int zfs_do_inherit(int argc, char **argv);
static int zfs_do_list(int argc, char **argv);
static int zfs_do_mount(int argc, char **argv);
static int zfs_do_rename(int argc, char **argv);
static int zfs_do_rollback(int argc, char **argv);
static int zfs_do_set(int argc, char **argv);
static int zfs_do_upgrade(int argc, char **argv);
static int zfs_do_snapshot(int argc, char **argv);
static int zfs_do_unmount(int argc, char **argv);
static int zfs_do_share(int argc, char **argv);
static int zfs_do_unshare(int argc, char **argv);
static int zfs_do_send(int argc, char **argv);
static int zfs_do_receive(int argc, char **argv);
static int zfs_do_promote(int argc, char **argv);
static int zfs_do_userspace(int argc, char **argv);
static int zfs_do_allow(int argc, char **argv);
static int zfs_do_unallow(int argc, char **argv);
static int zfs_do_hold(int argc, char **argv);
static int zfs_do_holds(int argc, char **argv);
static int zfs_do_release(int argc, char **argv);
static int zfs_do_diff(int argc, char **argv);
static int zfs_do_bookmark(int argc, char **argv);
static int zfs_do_channel_program(int argc, char **argv);
static int zfs_do_load_key(int argc, char **argv);
static int zfs_do_unload_key(int argc, char **argv);
static int zfs_do_change_key(int argc, char **argv);
static int zfs_do_project(int argc, char **argv);
static int zfs_do_version(int argc, char **argv);
static int zfs_do_redact(int argc, char **argv);
static int zfs_do_wait(int argc, char **argv);
#ifdef __FreeBSD__
static int zfs_do_jail(int argc, char **argv);
static int zfs_do_unjail(int argc, char **argv);
#endif
/*
* Enable a reasonable set of defaults for libumem debugging on DEBUG builds.
*/
#ifdef DEBUG
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#endif
typedef enum {
HELP_CLONE,
HELP_CREATE,
HELP_DESTROY,
HELP_GET,
HELP_INHERIT,
HELP_UPGRADE,
HELP_LIST,
HELP_MOUNT,
HELP_PROMOTE,
HELP_RECEIVE,
HELP_RENAME,
HELP_ROLLBACK,
HELP_SEND,
HELP_SET,
HELP_SHARE,
HELP_SNAPSHOT,
HELP_UNMOUNT,
HELP_UNSHARE,
HELP_ALLOW,
HELP_UNALLOW,
HELP_USERSPACE,
HELP_GROUPSPACE,
HELP_PROJECTSPACE,
HELP_PROJECT,
HELP_HOLD,
HELP_HOLDS,
HELP_RELEASE,
HELP_DIFF,
HELP_BOOKMARK,
HELP_CHANNEL_PROGRAM,
HELP_LOAD_KEY,
HELP_UNLOAD_KEY,
HELP_CHANGE_KEY,
HELP_VERSION,
HELP_REDACT,
HELP_JAIL,
HELP_UNJAIL,
HELP_WAIT,
} zfs_help_t;
typedef struct zfs_command {
const char *name;
int (*func)(int argc, char **argv);
zfs_help_t usage;
} zfs_command_t;
/*
* Master command table. Each ZFS command has a name, associated function, and
* usage message. The usage messages need to be internationalized, so we have
* to have a function to return the usage message based on a command index.
*
* These commands are organized according to how they are displayed in the usage
* message. An empty command (one with a NULL name) indicates an empty line in
* the generic usage message.
*/
static zfs_command_t command_table[] = {
{ "version", zfs_do_version, HELP_VERSION },
{ NULL },
{ "create", zfs_do_create, HELP_CREATE },
{ "destroy", zfs_do_destroy, HELP_DESTROY },
{ NULL },
{ "snapshot", zfs_do_snapshot, HELP_SNAPSHOT },
{ "rollback", zfs_do_rollback, HELP_ROLLBACK },
{ "clone", zfs_do_clone, HELP_CLONE },
{ "promote", zfs_do_promote, HELP_PROMOTE },
{ "rename", zfs_do_rename, HELP_RENAME },
{ "bookmark", zfs_do_bookmark, HELP_BOOKMARK },
{ "program", zfs_do_channel_program, HELP_CHANNEL_PROGRAM },
{ NULL },
{ "list", zfs_do_list, HELP_LIST },
{ NULL },
{ "set", zfs_do_set, HELP_SET },
{ "get", zfs_do_get, HELP_GET },
{ "inherit", zfs_do_inherit, HELP_INHERIT },
{ "upgrade", zfs_do_upgrade, HELP_UPGRADE },
{ NULL },
{ "userspace", zfs_do_userspace, HELP_USERSPACE },
{ "groupspace", zfs_do_userspace, HELP_GROUPSPACE },
{ "projectspace", zfs_do_userspace, HELP_PROJECTSPACE },
{ NULL },
{ "project", zfs_do_project, HELP_PROJECT },
{ NULL },
{ "mount", zfs_do_mount, HELP_MOUNT },
{ "unmount", zfs_do_unmount, HELP_UNMOUNT },
{ "share", zfs_do_share, HELP_SHARE },
{ "unshare", zfs_do_unshare, HELP_UNSHARE },
{ NULL },
{ "send", zfs_do_send, HELP_SEND },
{ "receive", zfs_do_receive, HELP_RECEIVE },
{ NULL },
{ "allow", zfs_do_allow, HELP_ALLOW },
{ NULL },
{ "unallow", zfs_do_unallow, HELP_UNALLOW },
{ NULL },
{ "hold", zfs_do_hold, HELP_HOLD },
{ "holds", zfs_do_holds, HELP_HOLDS },
{ "release", zfs_do_release, HELP_RELEASE },
{ "diff", zfs_do_diff, HELP_DIFF },
{ "load-key", zfs_do_load_key, HELP_LOAD_KEY },
{ "unload-key", zfs_do_unload_key, HELP_UNLOAD_KEY },
{ "change-key", zfs_do_change_key, HELP_CHANGE_KEY },
{ "redact", zfs_do_redact, HELP_REDACT },
{ "wait", zfs_do_wait, HELP_WAIT },
#ifdef __FreeBSD__
{ "jail", zfs_do_jail, HELP_JAIL },
{ "unjail", zfs_do_unjail, HELP_UNJAIL },
#endif
};
#define NCOMMAND (sizeof (command_table) / sizeof (command_table[0]))
zfs_command_t *current_command;
static const char *
get_usage(zfs_help_t idx)
{
switch (idx) {
case HELP_CLONE:
return (gettext("\tclone [-p] [-o property=value] ... "
"<snapshot> <filesystem|volume>\n"));
case HELP_CREATE:
return (gettext("\tcreate [-Pnpuv] [-o property=value] ... "
"<filesystem>\n"
"\tcreate [-Pnpsv] [-b blocksize] [-o property=value] ... "
"-V <size> <volume>\n"));
case HELP_DESTROY:
return (gettext("\tdestroy [-fnpRrv] <filesystem|volume>\n"
"\tdestroy [-dnpRrv] "
"<filesystem|volume>@<snap>[%<snap>][,...]\n"
"\tdestroy <filesystem|volume>#<bookmark>\n"));
case HELP_GET:
return (gettext("\tget [-rHp] [-d max] "
"[-o \"all\" | field[,...]]\n"
"\t [-t type[,...]] [-s source[,...]]\n"
"\t <\"all\" | property[,...]> "
"[filesystem|volume|snapshot|bookmark] ...\n"));
case HELP_INHERIT:
return (gettext("\tinherit [-rS] <property> "
"<filesystem|volume|snapshot> ...\n"));
case HELP_UPGRADE:
return (gettext("\tupgrade [-v]\n"
"\tupgrade [-r] [-V version] <-a | filesystem ...>\n"));
case HELP_LIST:
return (gettext("\tlist [-Hp] [-r|-d max] [-o property[,...]] "
"[-s property]...\n\t [-S property]... [-t type[,...]] "
"[filesystem|volume|snapshot] ...\n"));
case HELP_MOUNT:
return (gettext("\tmount\n"
"\tmount [-flvO] [-o opts] <-a | filesystem>\n"));
case HELP_PROMOTE:
return (gettext("\tpromote <clone-filesystem>\n"));
case HELP_RECEIVE:
return (gettext("\treceive [-vMnsFhu] "
"[-o <property>=<value>] ... [-x <property>] ...\n"
"\t <filesystem|volume|snapshot>\n"
"\treceive [-vMnsFhu] [-o <property>=<value>] ... "
"[-x <property>] ... \n"
"\t [-d | -e] <filesystem>\n"
"\treceive -A <filesystem|volume>\n"));
case HELP_RENAME:
return (gettext("\trename [-f] <filesystem|volume|snapshot> "
"<filesystem|volume|snapshot>\n"
"\trename -p [-f] <filesystem|volume> <filesystem|volume>\n"
"\trename -u [-f] <filesystem> <filesystem>\n"
"\trename -r <snapshot> <snapshot>\n"));
case HELP_ROLLBACK:
return (gettext("\trollback [-rRf] <snapshot>\n"));
case HELP_SEND:
return (gettext("\tsend [-DnPpRvLecwhb] [-[i|I] snapshot] "
"<snapshot>\n"
"\tsend [-DnvPLecw] [-i snapshot|bookmark] "
"<filesystem|volume|snapshot>\n"
"\tsend [-DnPpvLec] [-i bookmark|snapshot] "
"--redact <bookmark> <snapshot>\n"
"\tsend [-nvPe] -t <receive_resume_token>\n"
"\tsend [-Pnv] --saved filesystem\n"));
case HELP_SET:
return (gettext("\tset <property=value> ... "
"<filesystem|volume|snapshot> ...\n"));
case HELP_SHARE:
return (gettext("\tshare [-l] <-a [nfs|smb] | filesystem>\n"));
case HELP_SNAPSHOT:
return (gettext("\tsnapshot [-r] [-o property=value] ... "
"<filesystem|volume>@<snap> ...\n"));
case HELP_UNMOUNT:
return (gettext("\tunmount [-fu] "
"<-a | filesystem|mountpoint>\n"));
case HELP_UNSHARE:
return (gettext("\tunshare "
"<-a [nfs|smb] | filesystem|mountpoint>\n"));
case HELP_ALLOW:
return (gettext("\tallow <filesystem|volume>\n"
"\tallow [-ldug] "
"<\"everyone\"|user|group>[,...] <perm|@setname>[,...]\n"
"\t <filesystem|volume>\n"
"\tallow [-ld] -e <perm|@setname>[,...] "
"<filesystem|volume>\n"
"\tallow -c <perm|@setname>[,...] <filesystem|volume>\n"
"\tallow -s @setname <perm|@setname>[,...] "
"<filesystem|volume>\n"));
case HELP_UNALLOW:
return (gettext("\tunallow [-rldug] "
"<\"everyone\"|user|group>[,...]\n"
"\t [<perm|@setname>[,...]] <filesystem|volume>\n"
"\tunallow [-rld] -e [<perm|@setname>[,...]] "
"<filesystem|volume>\n"
"\tunallow [-r] -c [<perm|@setname>[,...]] "
"<filesystem|volume>\n"
"\tunallow [-r] -s @setname [<perm|@setname>[,...]] "
"<filesystem|volume>\n"));
case HELP_USERSPACE:
return (gettext("\tuserspace [-Hinp] [-o field[,...]] "
"[-s field] ...\n"
"\t [-S field] ... [-t type[,...]] "
"<filesystem|snapshot|path>\n"));
case HELP_GROUPSPACE:
return (gettext("\tgroupspace [-Hinp] [-o field[,...]] "
"[-s field] ...\n"
"\t [-S field] ... [-t type[,...]] "
"<filesystem|snapshot|path>\n"));
case HELP_PROJECTSPACE:
return (gettext("\tprojectspace [-Hp] [-o field[,...]] "
"[-s field] ... \n"
"\t [-S field] ... <filesystem|snapshot|path>\n"));
case HELP_PROJECT:
return (gettext("\tproject [-d|-r] <directory|file ...>\n"
"\tproject -c [-0] [-d|-r] [-p id] <directory|file ...>\n"
"\tproject -C [-k] [-r] <directory ...>\n"
"\tproject [-p id] [-r] [-s] <directory ...>\n"));
case HELP_HOLD:
return (gettext("\thold [-r] <tag> <snapshot> ...\n"));
case HELP_HOLDS:
return (gettext("\tholds [-rH] <snapshot> ...\n"));
case HELP_RELEASE:
return (gettext("\trelease [-r] <tag> <snapshot> ...\n"));
case HELP_DIFF:
return (gettext("\tdiff [-FHt] <snapshot> "
"[snapshot|filesystem]\n"));
case HELP_BOOKMARK:
return (gettext("\tbookmark <snapshot|bookmark> "
"<newbookmark>\n"));
case HELP_CHANNEL_PROGRAM:
return (gettext("\tprogram [-jn] [-t <instruction limit>] "
"[-m <memory limit (b)>]\n"
"\t <pool> <program file> [lua args...]\n"));
case HELP_LOAD_KEY:
return (gettext("\tload-key [-rn] [-L <keylocation>] "
"<-a | filesystem|volume>\n"));
case HELP_UNLOAD_KEY:
return (gettext("\tunload-key [-r] "
"<-a | filesystem|volume>\n"));
case HELP_CHANGE_KEY:
return (gettext("\tchange-key [-l] [-o keyformat=<value>]\n"
"\t [-o keylocation=<value>] [-o pbkdf2iters=<value>]\n"
"\t <filesystem|volume>\n"
"\tchange-key -i [-l] <filesystem|volume>\n"));
case HELP_VERSION:
return (gettext("\tversion\n"));
case HELP_REDACT:
return (gettext("\tredact <snapshot> <bookmark> "
"<redaction_snapshot> ...\n"));
case HELP_JAIL:
return (gettext("\tjail <jailid|jailname> <filesystem>\n"));
case HELP_UNJAIL:
return (gettext("\tunjail <jailid|jailname> <filesystem>\n"));
case HELP_WAIT:
return (gettext("\twait [-t <activity>] <filesystem>\n"));
default:
__builtin_unreachable();
}
}
void
nomem(void)
{
(void) fprintf(stderr, gettext("internal error: out of memory\n"));
exit(1);
}
/*
* Utility function to guarantee malloc() success.
*/
void *
safe_malloc(size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
nomem();
return (data);
}
static void *
safe_realloc(void *data, size_t size)
{
void *newp;
if ((newp = realloc(data, size)) == NULL) {
free(data);
nomem();
}
return (newp);
}
static char *
safe_strdup(char *str)
{
char *dupstr = strdup(str);
if (dupstr == NULL)
nomem();
return (dupstr);
}
/*
* Callback routine that will print out information for each of
* the properties.
*/
static int
usage_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-15s ", zfs_prop_to_name(prop));
if (zfs_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, "YES ");
if (zfs_prop_inheritable(prop))
(void) fprintf(fp, " YES ");
else
(void) fprintf(fp, " NO ");
if (zfs_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", zfs_prop_values(prop));
return (ZPROP_CONT);
}
/*
* Display usage message. If we're inside a command, display only the usage for
* that command. Otherwise, iterate over the entire command table and display
* a complete usage message.
*/
static void
usage(boolean_t requested)
{
int i;
boolean_t show_properties = B_FALSE;
FILE *fp = requested ? stdout : stderr;
if (current_command == NULL) {
(void) fprintf(fp, gettext("usage: zfs command args ...\n"));
(void) fprintf(fp,
gettext("where 'command' is one of the following:\n\n"));
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
(void) fprintf(fp, "\n");
else
(void) fprintf(fp, "%s",
get_usage(command_table[i].usage));
}
(void) fprintf(fp, gettext("\nEach dataset is of the form: "
"pool/[dataset/]*dataset[@name]\n"));
} else {
(void) fprintf(fp, gettext("usage:\n"));
(void) fprintf(fp, "%s", get_usage(current_command->usage));
}
if (current_command != NULL &&
(strcmp(current_command->name, "set") == 0 ||
strcmp(current_command->name, "get") == 0 ||
strcmp(current_command->name, "inherit") == 0 ||
strcmp(current_command->name, "list") == 0))
show_properties = B_TRUE;
if (show_properties) {
(void) fprintf(fp,
gettext("\nThe following properties are supported:\n"));
(void) fprintf(fp, "\n\t%-14s %s %s %s\n\n",
"PROPERTY", "EDIT", "INHERIT", "VALUES");
/* Iterate over all properties */
(void) zprop_iter(usage_prop_cb, fp, B_FALSE, B_TRUE,
ZFS_TYPE_DATASET);
(void) fprintf(fp, "\t%-15s ", "userused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "groupused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "projectused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "userobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "groupobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "projectobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "userquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "groupquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "projectquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "userobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "groupobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "projectobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "written@<snap>");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "written#<bookmark>");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, gettext("\nSizes are specified in bytes "
"with standard units such as K, M, G, etc.\n"));
(void) fprintf(fp, gettext("\nUser-defined properties can "
"be specified by using a name containing a colon (:).\n"));
(void) fprintf(fp, gettext("\nThe {user|group|project}"
"[obj]{used|quota}@ properties must be appended with\n"
"a user|group|project specifier of one of these forms:\n"
" POSIX name (eg: \"matt\")\n"
" POSIX id (eg: \"126829\")\n"
" SMB name@domain (eg: \"matt@sun\")\n"
" SMB SID (eg: \"S-1-234-567-89\")\n"));
} else {
(void) fprintf(fp,
gettext("\nFor the property list, run: %s\n"),
"zfs set|get");
(void) fprintf(fp,
gettext("\nFor the delegated permission list, run: %s\n"),
"zfs allow|unallow");
}
/*
* See comments at end of main().
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
exit(requested ? 0 : 2);
}
/*
* Take a property=value argument string and add it to the given nvlist.
* Modifies the argument inplace.
*/
static boolean_t
parseprop(nvlist_t *props, char *propname)
{
char *propval;
if ((propval = strchr(propname, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for property=value argument\n"));
return (B_FALSE);
}
*propval = '\0';
propval++;
if (nvlist_exists(props, propname)) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (B_FALSE);
}
if (nvlist_add_string(props, propname, propval) != 0)
nomem();
return (B_TRUE);
}
/*
* Take a property name argument and add it to the given nvlist.
* Modifies the argument inplace.
*/
static boolean_t
parsepropname(nvlist_t *props, char *propname)
{
if (strchr(propname, '=') != NULL) {
(void) fprintf(stderr, gettext("invalid character "
"'=' in property argument\n"));
return (B_FALSE);
}
if (nvlist_exists(props, propname)) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (B_FALSE);
}
if (nvlist_add_boolean(props, propname) != 0)
nomem();
return (B_TRUE);
}
static int
parse_depth(char *opt, int *flags)
{
char *tmp;
int depth;
depth = (int)strtol(opt, &tmp, 0);
if (*tmp) {
(void) fprintf(stderr,
gettext("%s is not an integer\n"), optarg);
usage(B_FALSE);
}
if (depth < 0) {
(void) fprintf(stderr,
gettext("Depth can not be negative.\n"));
usage(B_FALSE);
}
*flags |= (ZFS_ITER_DEPTH_LIMIT|ZFS_ITER_RECURSE);
return (depth);
}
#define PROGRESS_DELAY 2 /* seconds */
static char *pt_reverse = "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b";
static time_t pt_begin;
static char *pt_header = NULL;
static boolean_t pt_shown;
static void
start_progress_timer(void)
{
pt_begin = time(NULL) + PROGRESS_DELAY;
pt_shown = B_FALSE;
}
static void
set_progress_header(char *header)
{
assert(pt_header == NULL);
pt_header = safe_strdup(header);
if (pt_shown) {
(void) printf("%s: ", header);
(void) fflush(stdout);
}
}
static void
update_progress(char *update)
{
if (!pt_shown && time(NULL) > pt_begin) {
int len = strlen(update);
(void) printf("%s: %s%*.*s", pt_header, update, len, len,
pt_reverse);
(void) fflush(stdout);
pt_shown = B_TRUE;
} else if (pt_shown) {
int len = strlen(update);
(void) printf("%s%*.*s", update, len, len, pt_reverse);
(void) fflush(stdout);
}
}
static void
finish_progress(char *done)
{
if (pt_shown) {
(void) printf("%s\n", done);
(void) fflush(stdout);
}
free(pt_header);
pt_header = NULL;
}
/* This function checks if the passed fd refers to /dev/null or /dev/zero */
#ifdef __linux__
static boolean_t
is_dev_nullzero(int fd)
{
struct stat st;
fstat(fd, &st);
return (major(st.st_rdev) == 1 && (minor(st.st_rdev) == 3 /* null */ ||
minor(st.st_rdev) == 5 /* zero */));
}
#endif
static void
note_dev_error(int err, int fd)
{
#ifdef __linux__
if (err == EINVAL && is_dev_nullzero(fd)) {
(void) fprintf(stderr,
gettext("Error: Writing directly to /dev/{null,zero} files"
" on certain kernels is not currently implemented.\n"
"(As a workaround, "
"try \"zfs send [...] | cat > /dev/null\")\n"));
}
#endif
}
static int
zfs_mount_and_share(libzfs_handle_t *hdl, const char *dataset, zfs_type_t type)
{
zfs_handle_t *zhp = NULL;
int ret = 0;
zhp = zfs_open(hdl, dataset, type);
if (zhp == NULL)
return (1);
/*
* Volumes may neither be mounted or shared. Potentially in the
* future filesystems detected on these volumes could be mounted.
*/
if (zfs_get_type(zhp) == ZFS_TYPE_VOLUME) {
zfs_close(zhp);
return (0);
}
/*
* Mount and/or share the new filesystem as appropriate. We provide a
* verbose error message to let the user know that their filesystem was
* in fact created, even if we failed to mount or share it.
*
* If the user doesn't want the dataset automatically mounted, then
* skip the mount/share step
*/
if (zfs_prop_valid_for_type(ZFS_PROP_CANMOUNT, type, B_FALSE) &&
zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_ON) {
if (zfs_mount_delegation_check()) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but it may only be "
"mounted by root\n"));
ret = 1;
} else if (zfs_mount(zhp, NULL, 0) != 0) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but not mounted\n"));
ret = 1;
} else if (zfs_share(zhp) != 0) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but not shared\n"));
ret = 1;
}
zfs_commit_all_shares();
}
zfs_close(zhp);
return (ret);
}
/*
* zfs clone [-p] [-o prop=value] ... <snap> <fs | vol>
*
* Given an existing dataset, create a writable copy whose initial contents
* are the same as the source. The newly created dataset maintains a
* dependency on the original; the original cannot be destroyed so long as
* the clone exists.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
*/
static int
zfs_do_clone(int argc, char **argv)
{
zfs_handle_t *zhp = NULL;
boolean_t parents = B_FALSE;
nvlist_t *props;
int ret = 0;
int c;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, "o:p")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
return (1);
}
break;
case 'p':
parents = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source dataset "
"argument\n"));
goto usage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing target dataset "
"argument\n"));
goto usage;
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
goto usage;
}
/* open the source dataset */
if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL) {
nvlist_free(props);
return (1);
}
if (parents && zfs_name_valid(argv[1], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) {
/*
* Now create the ancestors of the target dataset. If the
* target already exists and '-p' option was used we should not
* complain.
*/
if (zfs_dataset_exists(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) {
zfs_close(zhp);
nvlist_free(props);
return (0);
}
if (zfs_create_ancestors(g_zfs, argv[1]) != 0) {
zfs_close(zhp);
nvlist_free(props);
return (1);
}
}
/* pass to libzfs */
ret = zfs_clone(zhp, argv[1], props);
/* create the mountpoint if necessary */
if (ret == 0) {
if (log_history) {
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
ret = zfs_mount_and_share(g_zfs, argv[1], ZFS_TYPE_DATASET);
}
zfs_close(zhp);
nvlist_free(props);
return (!!ret);
usage:
ASSERT3P(zhp, ==, NULL);
nvlist_free(props);
usage(B_FALSE);
return (-1);
}
/*
* Return a default volblocksize for the pool which always uses more than
* half of the data sectors. This primarily applies to dRAID which always
* writes full stripe widths.
*/
static uint64_t
default_volblocksize(zpool_handle_t *zhp, nvlist_t *props)
{
uint64_t volblocksize, asize = SPA_MINBLOCKSIZE;
nvlist_t *tree, **vdevs;
uint_t nvdevs;
nvlist_t *config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) != 0 ||
nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
&vdevs, &nvdevs) != 0) {
return (ZVOL_DEFAULT_BLOCKSIZE);
}
for (int i = 0; i < nvdevs; i++) {
nvlist_t *nv = vdevs[i];
uint64_t ashift, ndata, nparity;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &ashift) != 0)
continue;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DRAID_NDATA,
&ndata) == 0) {
/* dRAID minimum allocation width */
asize = MAX(asize, ndata * (1ULL << ashift));
} else if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
&nparity) == 0) {
/* raidz minimum allocation width */
if (nparity == 1)
asize = MAX(asize, 2 * (1ULL << ashift));
else
asize = MAX(asize, 4 * (1ULL << ashift));
} else {
/* mirror or (non-redundant) leaf vdev */
asize = MAX(asize, 1ULL << ashift);
}
}
/*
* Calculate the target volblocksize such that more than half
* of the asize is used. The following table is for 4k sectors.
*
* n asize blksz used | n asize blksz used
* -------------------------+---------------------------------
* 1 4,096 8,192 100% | 9 36,864 32,768 88%
* 2 8,192 8,192 100% | 10 40,960 32,768 80%
* 3 12,288 8,192 66% | 11 45,056 32,768 72%
* 4 16,384 16,384 100% | 12 49,152 32,768 66%
* 5 20,480 16,384 80% | 13 53,248 32,768 61%
* 6 24,576 16,384 66% | 14 57,344 32,768 57%
* 7 28,672 16,384 57% | 15 61,440 32,768 53%
* 8 32,768 32,768 100% | 16 65,536 65,636 100%
*
* This is primarily a concern for dRAID which always allocates
* a full stripe width. For dRAID the default stripe width is
* n=8 in which case the volblocksize is set to 32k. Ignoring
* compression there are no unused sectors. This same reasoning
* applies to raidz[2,3] so target 4 sectors to minimize waste.
*/
uint64_t tgt_volblocksize = ZVOL_DEFAULT_BLOCKSIZE;
while (tgt_volblocksize * 2 <= asize)
tgt_volblocksize *= 2;
const char *prop = zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE);
if (nvlist_lookup_uint64(props, prop, &volblocksize) == 0) {
/* Issue a warning when a non-optimal size is requested. */
if (volblocksize < ZVOL_DEFAULT_BLOCKSIZE) {
(void) fprintf(stderr, gettext("Warning: "
"volblocksize (%llu) is less than the default "
"minimum block size (%llu).\nTo reduce wasted "
"space a volblocksize of %llu is recommended.\n"),
(u_longlong_t)volblocksize,
(u_longlong_t)ZVOL_DEFAULT_BLOCKSIZE,
(u_longlong_t)tgt_volblocksize);
} else if (volblocksize < tgt_volblocksize) {
(void) fprintf(stderr, gettext("Warning: "
"volblocksize (%llu) is much less than the "
"minimum allocation\nunit (%llu), which wastes "
"at least %llu%% of space. To reduce wasted "
"space,\nuse a larger volblocksize (%llu is "
"recommended), fewer dRAID data disks\n"
"per group, or smaller sector size (ashift).\n"),
(u_longlong_t)volblocksize, (u_longlong_t)asize,
(u_longlong_t)((100 * (asize - volblocksize)) /
asize), (u_longlong_t)tgt_volblocksize);
}
} else {
volblocksize = tgt_volblocksize;
fnvlist_add_uint64(props, prop, volblocksize);
}
return (volblocksize);
}
/*
* zfs create [-Pnpv] [-o prop=value] ... fs
* zfs create [-Pnpsv] [-b blocksize] [-o prop=value] ... -V vol size
*
* Create a new dataset. This command can be used to create filesystems
* and volumes. Snapshot creation is handled by 'zfs snapshot'.
* For volumes, the user must specify a size to be used.
*
* The '-s' flag applies only to volumes, and indicates that we should not try
* to set the reservation for this volume. By default we set a reservation
* equal to the size for any volume. For pools with SPA_VERSION >=
* SPA_VERSION_REFRESERVATION, we set a refreservation instead.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
*
* The '-n' flag is no-op (dry run) mode. This will perform a user-space sanity
* check of arguments and properties, but does not check for permissions,
* available space, etc.
*
* The '-u' flag prevents the newly created file system from being mounted.
*
* The '-v' flag is for verbose output.
*
* The '-P' flag is used for parseable output. It implies '-v'.
*/
static int
zfs_do_create(int argc, char **argv)
{
zfs_type_t type = ZFS_TYPE_FILESYSTEM;
zpool_handle_t *zpool_handle = NULL;
nvlist_t *real_props = NULL;
uint64_t volsize = 0;
int c;
boolean_t noreserve = B_FALSE;
boolean_t bflag = B_FALSE;
boolean_t parents = B_FALSE;
boolean_t dryrun = B_FALSE;
boolean_t nomount = B_FALSE;
boolean_t verbose = B_FALSE;
boolean_t parseable = B_FALSE;
int ret = 1;
nvlist_t *props;
uint64_t intval;
char *strval;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, ":PV:b:nso:puv")) != -1) {
switch (c) {
case 'V':
type = ZFS_TYPE_VOLUME;
if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) {
(void) fprintf(stderr, gettext("bad volume "
"size '%s': %s\n"), optarg,
libzfs_error_description(g_zfs));
goto error;
}
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), intval) != 0)
nomem();
volsize = intval;
break;
case 'P':
verbose = B_TRUE;
parseable = B_TRUE;
break;
case 'p':
parents = B_TRUE;
break;
case 'b':
bflag = B_TRUE;
if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) {
(void) fprintf(stderr, gettext("bad volume "
"block size '%s': %s\n"), optarg,
libzfs_error_description(g_zfs));
goto error;
}
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
intval) != 0)
nomem();
break;
case 'n':
dryrun = B_TRUE;
break;
case 'o':
if (!parseprop(props, optarg))
goto error;
break;
case 's':
noreserve = B_TRUE;
break;
case 'u':
nomount = B_TRUE;
break;
case 'v':
verbose = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing size "
"argument\n"));
goto badusage;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto badusage;
}
}
if ((bflag || noreserve) && type != ZFS_TYPE_VOLUME) {
(void) fprintf(stderr, gettext("'-s' and '-b' can only be "
"used when creating a volume\n"));
goto badusage;
}
if (nomount && type != ZFS_TYPE_FILESYSTEM) {
(void) fprintf(stderr, gettext("'-u' can only be "
"used when creating a filesystem\n"));
goto badusage;
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc == 0) {
(void) fprintf(stderr, gettext("missing %s argument\n"),
zfs_type_to_name(type));
goto badusage;
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
goto badusage;
}
if (dryrun || type == ZFS_TYPE_VOLUME) {
char msg[ZFS_MAX_DATASET_NAME_LEN * 2];
char *p;
if ((p = strchr(argv[0], '/')) != NULL)
*p = '\0';
zpool_handle = zpool_open(g_zfs, argv[0]);
if (p != NULL)
*p = '/';
if (zpool_handle == NULL)
goto error;
(void) snprintf(msg, sizeof (msg),
dryrun ? gettext("cannot verify '%s'") :
gettext("cannot create '%s'"), argv[0]);
if (props && (real_props = zfs_valid_proplist(g_zfs, type,
props, 0, NULL, zpool_handle, B_TRUE, msg)) == NULL) {
zpool_close(zpool_handle);
goto error;
}
}
if (type == ZFS_TYPE_VOLUME) {
const char *prop = zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE);
uint64_t volblocksize = default_volblocksize(zpool_handle,
real_props);
if (volblocksize != ZVOL_DEFAULT_BLOCKSIZE &&
nvlist_lookup_string(props, prop, &strval) != 0) {
if (asprintf(&strval, "%llu",
(u_longlong_t)volblocksize) == -1)
nomem();
nvlist_add_string(props, prop, strval);
free(strval);
}
/*
* If volsize is not a multiple of volblocksize, round it
* up to the nearest multiple of the volblocksize.
*/
if (volsize % volblocksize) {
volsize = P2ROUNDUP_TYPED(volsize, volblocksize,
uint64_t);
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), volsize) != 0) {
nvlist_free(props);
nomem();
}
}
}
if (type == ZFS_TYPE_VOLUME && !noreserve) {
uint64_t spa_version;
zfs_prop_t resv_prop;
spa_version = zpool_get_prop_int(zpool_handle,
ZPOOL_PROP_VERSION, NULL);
if (spa_version >= SPA_VERSION_REFRESERVATION)
resv_prop = ZFS_PROP_REFRESERVATION;
else
resv_prop = ZFS_PROP_RESERVATION;
volsize = zvol_volsize_to_reservation(zpool_handle, volsize,
real_props);
if (nvlist_lookup_string(props, zfs_prop_to_name(resv_prop),
&strval) != 0) {
if (nvlist_add_uint64(props,
zfs_prop_to_name(resv_prop), volsize) != 0) {
nvlist_free(props);
nomem();
}
}
}
if (zpool_handle != NULL) {
zpool_close(zpool_handle);
nvlist_free(real_props);
}
if (parents && zfs_name_valid(argv[0], type)) {
/*
* Now create the ancestors of target dataset. If the target
* already exists and '-p' option was used we should not
* complain.
*/
if (zfs_dataset_exists(g_zfs, argv[0], type)) {
ret = 0;
goto error;
}
if (verbose) {
(void) printf(parseable ? "create_ancestors\t%s\n" :
dryrun ? "would create ancestors of %s\n" :
"create ancestors of %s\n", argv[0]);
}
if (!dryrun) {
if (zfs_create_ancestors(g_zfs, argv[0]) != 0) {
goto error;
}
}
}
if (verbose) {
nvpair_t *nvp = NULL;
(void) printf(parseable ? "create\t%s\n" :
dryrun ? "would create %s\n" : "create %s\n", argv[0]);
while ((nvp = nvlist_next_nvpair(props, nvp)) != NULL) {
uint64_t uval;
char *sval;
switch (nvpair_type(nvp)) {
case DATA_TYPE_UINT64:
VERIFY0(nvpair_value_uint64(nvp, &uval));
(void) printf(parseable ?
"property\t%s\t%llu\n" : "\t%s=%llu\n",
nvpair_name(nvp), (u_longlong_t)uval);
break;
case DATA_TYPE_STRING:
VERIFY0(nvpair_value_string(nvp, &sval));
(void) printf(parseable ?
"property\t%s\t%s\n" : "\t%s=%s\n",
nvpair_name(nvp), sval);
break;
default:
(void) fprintf(stderr, "property '%s' "
"has illegal type %d\n",
nvpair_name(nvp), nvpair_type(nvp));
abort();
}
}
}
if (dryrun) {
ret = 0;
goto error;
}
/* pass to libzfs */
if (zfs_create(g_zfs, argv[0], type, props) != 0)
goto error;
if (log_history) {
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (nomount) {
ret = 0;
goto error;
}
ret = zfs_mount_and_share(g_zfs, argv[0], ZFS_TYPE_DATASET);
error:
nvlist_free(props);
return (ret);
badusage:
nvlist_free(props);
usage(B_FALSE);
return (2);
}
/*
* zfs destroy [-rRf] <fs, vol>
* zfs destroy [-rRd] <snap>
*
* -r Recursively destroy all children
* -R Recursively destroy all dependents, including clones
* -f Force unmounting of any dependents
* -d If we can't destroy now, mark for deferred destruction
*
* Destroys the given dataset. By default, it will unmount any filesystems,
* and refuse to destroy a dataset that has any dependents. A dependent can
* either be a child, or a clone of a child.
*/
typedef struct destroy_cbdata {
boolean_t cb_first;
boolean_t cb_force;
boolean_t cb_recurse;
boolean_t cb_error;
boolean_t cb_doclones;
zfs_handle_t *cb_target;
boolean_t cb_defer_destroy;
boolean_t cb_verbose;
boolean_t cb_parsable;
boolean_t cb_dryrun;
nvlist_t *cb_nvl;
nvlist_t *cb_batchedsnaps;
/* first snap in contiguous run */
char *cb_firstsnap;
/* previous snap in contiguous run */
char *cb_prevsnap;
int64_t cb_snapused;
char *cb_snapspec;
char *cb_bookmark;
uint64_t cb_snap_count;
} destroy_cbdata_t;
/*
* Check for any dependents based on the '-r' or '-R' flags.
*/
static int
destroy_check_dependent(zfs_handle_t *zhp, void *data)
{
destroy_cbdata_t *cbp = data;
const char *tname = zfs_get_name(cbp->cb_target);
const char *name = zfs_get_name(zhp);
if (strncmp(tname, name, strlen(tname)) == 0 &&
(name[strlen(tname)] == '/' || name[strlen(tname)] == '@')) {
/*
* This is a direct descendant, not a clone somewhere else in
* the hierarchy.
*/
if (cbp->cb_recurse)
goto out;
if (cbp->cb_first) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"%s has children\n"),
zfs_get_name(cbp->cb_target),
zfs_type_to_name(zfs_get_type(cbp->cb_target)));
(void) fprintf(stderr, gettext("use '-r' to destroy "
"the following datasets:\n"));
cbp->cb_first = B_FALSE;
cbp->cb_error = B_TRUE;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
} else {
/*
* This is a clone. We only want to report this if the '-r'
* wasn't specified, or the target is a snapshot.
*/
if (!cbp->cb_recurse &&
zfs_get_type(cbp->cb_target) != ZFS_TYPE_SNAPSHOT)
goto out;
if (cbp->cb_first) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"%s has dependent clones\n"),
zfs_get_name(cbp->cb_target),
zfs_type_to_name(zfs_get_type(cbp->cb_target)));
(void) fprintf(stderr, gettext("use '-R' to destroy "
"the following datasets:\n"));
cbp->cb_first = B_FALSE;
cbp->cb_error = B_TRUE;
cbp->cb_dryrun = B_TRUE;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
}
out:
zfs_close(zhp);
return (0);
}
static int
destroy_batched(destroy_cbdata_t *cb)
{
int error = zfs_destroy_snaps_nvl(g_zfs,
cb->cb_batchedsnaps, B_FALSE);
fnvlist_free(cb->cb_batchedsnaps);
cb->cb_batchedsnaps = fnvlist_alloc();
return (error);
}
static int
destroy_callback(zfs_handle_t *zhp, void *data)
{
destroy_cbdata_t *cb = data;
const char *name = zfs_get_name(zhp);
int error;
if (cb->cb_verbose) {
if (cb->cb_parsable) {
(void) printf("destroy\t%s\n", name);
} else if (cb->cb_dryrun) {
(void) printf(gettext("would destroy %s\n"),
name);
} else {
(void) printf(gettext("will destroy %s\n"),
name);
}
}
/*
* Ignore pools (which we've already flagged as an error before getting
* here).
*/
if (strchr(zfs_get_name(zhp), '/') == NULL &&
zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) {
zfs_close(zhp);
return (0);
}
if (cb->cb_dryrun) {
zfs_close(zhp);
return (0);
}
/*
* We batch up all contiguous snapshots (even of different
* filesystems) and destroy them with one ioctl. We can't
* simply do all snap deletions and then all fs deletions,
* because we must delete a clone before its origin.
*/
if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT) {
cb->cb_snap_count++;
fnvlist_add_boolean(cb->cb_batchedsnaps, name);
if (cb->cb_snap_count % 10 == 0 && cb->cb_defer_destroy)
error = destroy_batched(cb);
} else {
error = destroy_batched(cb);
if (error != 0 ||
zfs_unmount(zhp, NULL, cb->cb_force ? MS_FORCE : 0) != 0 ||
zfs_destroy(zhp, cb->cb_defer_destroy) != 0) {
zfs_close(zhp);
/*
* When performing a recursive destroy we ignore errors
* so that the recursive destroy could continue
* destroying past problem datasets
*/
if (cb->cb_recurse) {
cb->cb_error = B_TRUE;
return (0);
}
return (-1);
}
}
zfs_close(zhp);
return (0);
}
static int
destroy_print_cb(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
const char *name = zfs_get_name(zhp);
int err = 0;
if (nvlist_exists(cb->cb_nvl, name)) {
if (cb->cb_firstsnap == NULL)
cb->cb_firstsnap = strdup(name);
if (cb->cb_prevsnap != NULL)
free(cb->cb_prevsnap);
/* this snap continues the current range */
cb->cb_prevsnap = strdup(name);
if (cb->cb_firstsnap == NULL || cb->cb_prevsnap == NULL)
nomem();
if (cb->cb_verbose) {
if (cb->cb_parsable) {
(void) printf("destroy\t%s\n", name);
} else if (cb->cb_dryrun) {
(void) printf(gettext("would destroy %s\n"),
name);
} else {
(void) printf(gettext("will destroy %s\n"),
name);
}
}
} else if (cb->cb_firstsnap != NULL) {
/* end of this range */
uint64_t used = 0;
err = lzc_snaprange_space(cb->cb_firstsnap,
cb->cb_prevsnap, &used);
cb->cb_snapused += used;
free(cb->cb_firstsnap);
cb->cb_firstsnap = NULL;
free(cb->cb_prevsnap);
cb->cb_prevsnap = NULL;
}
zfs_close(zhp);
return (err);
}
static int
destroy_print_snapshots(zfs_handle_t *fs_zhp, destroy_cbdata_t *cb)
{
int err;
assert(cb->cb_firstsnap == NULL);
assert(cb->cb_prevsnap == NULL);
err = zfs_iter_snapshots_sorted(fs_zhp, destroy_print_cb, cb, 0, 0);
if (cb->cb_firstsnap != NULL) {
uint64_t used = 0;
if (err == 0) {
err = lzc_snaprange_space(cb->cb_firstsnap,
cb->cb_prevsnap, &used);
}
cb->cb_snapused += used;
free(cb->cb_firstsnap);
cb->cb_firstsnap = NULL;
free(cb->cb_prevsnap);
cb->cb_prevsnap = NULL;
}
return (err);
}
static int
snapshot_to_nvl_cb(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
int err = 0;
/* Check for clones. */
if (!cb->cb_doclones && !cb->cb_defer_destroy) {
cb->cb_target = zhp;
cb->cb_first = B_TRUE;
err = zfs_iter_dependents(zhp, B_TRUE,
destroy_check_dependent, cb);
}
if (err == 0) {
if (nvlist_add_boolean(cb->cb_nvl, zfs_get_name(zhp)))
nomem();
}
zfs_close(zhp);
return (err);
}
static int
gather_snapshots(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
int err = 0;
err = zfs_iter_snapspec(zhp, cb->cb_snapspec, snapshot_to_nvl_cb, cb);
if (err == ENOENT)
err = 0;
if (err != 0)
goto out;
if (cb->cb_verbose) {
err = destroy_print_snapshots(zhp, cb);
if (err != 0)
goto out;
}
if (cb->cb_recurse)
err = zfs_iter_filesystems(zhp, gather_snapshots, cb);
out:
zfs_close(zhp);
return (err);
}
static int
destroy_clones(destroy_cbdata_t *cb)
{
nvpair_t *pair;
for (pair = nvlist_next_nvpair(cb->cb_nvl, NULL);
pair != NULL;
pair = nvlist_next_nvpair(cb->cb_nvl, pair)) {
zfs_handle_t *zhp = zfs_open(g_zfs, nvpair_name(pair),
ZFS_TYPE_SNAPSHOT);
if (zhp != NULL) {
boolean_t defer = cb->cb_defer_destroy;
int err;
/*
* We can't defer destroy non-snapshots, so set it to
* false while destroying the clones.
*/
cb->cb_defer_destroy = B_FALSE;
err = zfs_iter_dependents(zhp, B_FALSE,
destroy_callback, cb);
cb->cb_defer_destroy = defer;
zfs_close(zhp);
if (err != 0)
return (err);
}
}
return (0);
}
static int
zfs_do_destroy(int argc, char **argv)
{
destroy_cbdata_t cb = { 0 };
int rv = 0;
int err = 0;
int c;
zfs_handle_t *zhp = NULL;
char *at, *pound;
zfs_type_t type = ZFS_TYPE_DATASET;
/* check options */
while ((c = getopt(argc, argv, "vpndfrR")) != -1) {
switch (c) {
case 'v':
cb.cb_verbose = B_TRUE;
break;
case 'p':
cb.cb_verbose = B_TRUE;
cb.cb_parsable = B_TRUE;
break;
case 'n':
cb.cb_dryrun = B_TRUE;
break;
case 'd':
cb.cb_defer_destroy = B_TRUE;
type = ZFS_TYPE_SNAPSHOT;
break;
case 'f':
cb.cb_force = B_TRUE;
break;
case 'r':
cb.cb_recurse = B_TRUE;
break;
case 'R':
cb.cb_recurse = B_TRUE;
cb.cb_doclones = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc == 0) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
at = strchr(argv[0], '@');
pound = strchr(argv[0], '#');
if (at != NULL) {
/* Build the list of snaps to destroy in cb_nvl. */
cb.cb_nvl = fnvlist_alloc();
*at = '\0';
zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
nvlist_free(cb.cb_nvl);
return (1);
}
cb.cb_snapspec = at + 1;
if (gather_snapshots(zfs_handle_dup(zhp), &cb) != 0 ||
cb.cb_error) {
rv = 1;
goto out;
}
if (nvlist_empty(cb.cb_nvl)) {
(void) fprintf(stderr, gettext("could not find any "
"snapshots to destroy; check snapshot names.\n"));
rv = 1;
goto out;
}
if (cb.cb_verbose) {
char buf[16];
zfs_nicebytes(cb.cb_snapused, buf, sizeof (buf));
if (cb.cb_parsable) {
(void) printf("reclaim\t%llu\n",
(u_longlong_t)cb.cb_snapused);
} else if (cb.cb_dryrun) {
(void) printf(gettext("would reclaim %s\n"),
buf);
} else {
(void) printf(gettext("will reclaim %s\n"),
buf);
}
}
if (!cb.cb_dryrun) {
if (cb.cb_doclones) {
cb.cb_batchedsnaps = fnvlist_alloc();
err = destroy_clones(&cb);
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs,
cb.cb_batchedsnaps, B_FALSE);
}
if (err != 0) {
rv = 1;
goto out;
}
}
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs, cb.cb_nvl,
cb.cb_defer_destroy);
}
}
if (err != 0)
rv = 1;
} else if (pound != NULL) {
int err;
nvlist_t *nvl;
if (cb.cb_dryrun) {
(void) fprintf(stderr,
"dryrun is not supported with bookmark\n");
return (-1);
}
if (cb.cb_defer_destroy) {
(void) fprintf(stderr,
"defer destroy is not supported with bookmark\n");
return (-1);
}
if (cb.cb_recurse) {
(void) fprintf(stderr,
"recursive is not supported with bookmark\n");
return (-1);
}
/*
* Unfortunately, zfs_bookmark() doesn't honor the
* casesensitivity setting. However, we can't simply
* remove this check, because lzc_destroy_bookmarks()
* ignores non-existent bookmarks, so this is necessary
* to get a proper error message.
*/
if (!zfs_bookmark_exists(argv[0])) {
(void) fprintf(stderr, gettext("bookmark '%s' "
"does not exist.\n"), argv[0]);
return (1);
}
nvl = fnvlist_alloc();
fnvlist_add_boolean(nvl, argv[0]);
err = lzc_destroy_bookmarks(nvl, NULL);
if (err != 0) {
(void) zfs_standard_error(g_zfs, err,
"cannot destroy bookmark");
}
nvlist_free(nvl);
return (err);
} else {
/* Open the given dataset */
if ((zhp = zfs_open(g_zfs, argv[0], type)) == NULL)
return (1);
cb.cb_target = zhp;
/*
* Perform an explicit check for pools before going any further.
*/
if (!cb.cb_recurse && strchr(zfs_get_name(zhp), '/') == NULL &&
zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"operation does not apply to pools\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use 'zfs destroy -r "
"%s' to destroy all datasets in the pool\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use 'zpool destroy %s' "
"to destroy the pool itself\n"), zfs_get_name(zhp));
rv = 1;
goto out;
}
/*
* Check for any dependents and/or clones.
*/
cb.cb_first = B_TRUE;
if (!cb.cb_doclones &&
zfs_iter_dependents(zhp, B_TRUE, destroy_check_dependent,
&cb) != 0) {
rv = 1;
goto out;
}
if (cb.cb_error) {
rv = 1;
goto out;
}
cb.cb_batchedsnaps = fnvlist_alloc();
if (zfs_iter_dependents(zhp, B_FALSE, destroy_callback,
&cb) != 0) {
rv = 1;
goto out;
}
/*
* Do the real thing. The callback will close the
* handle regardless of whether it succeeds or not.
*/
err = destroy_callback(zhp, &cb);
zhp = NULL;
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs,
cb.cb_batchedsnaps, cb.cb_defer_destroy);
}
if (err != 0 || cb.cb_error == B_TRUE)
rv = 1;
}
out:
fnvlist_free(cb.cb_batchedsnaps);
fnvlist_free(cb.cb_nvl);
if (zhp != NULL)
zfs_close(zhp);
return (rv);
}
static boolean_t
is_recvd_column(zprop_get_cbdata_t *cbp)
{
int i;
zfs_get_column_t col;
for (i = 0; i < ZFS_GET_NCOLS &&
(col = cbp->cb_columns[i]) != GET_COL_NONE; i++)
if (col == GET_COL_RECVD)
return (B_TRUE);
return (B_FALSE);
}
/*
* zfs get [-rHp] [-o all | field[,field]...] [-s source[,source]...]
* < all | property[,property]... > < fs | snap | vol > ...
*
* -r recurse over any child datasets
* -H scripted mode. Headers are stripped, and fields are separated
* by tabs instead of spaces.
* -o Set of fields to display. One of "name,property,value,
* received,source". Default is "name,property,value,source".
* "all" is an alias for all five.
* -s Set of sources to allow. One of
* "local,default,inherited,received,temporary,none". Default is
* all six.
* -p Display values in parsable (literal) format.
*
* Prints properties for the given datasets. The user can control which
* columns to display as well as which property types to allow.
*/
/*
* Invoked to display the properties for a single dataset.
*/
static int
get_callback(zfs_handle_t *zhp, void *data)
{
char buf[ZFS_MAXPROPLEN];
char rbuf[ZFS_MAXPROPLEN];
zprop_source_t sourcetype;
char source[ZFS_MAX_DATASET_NAME_LEN];
zprop_get_cbdata_t *cbp = data;
nvlist_t *user_props = zfs_get_user_props(zhp);
zprop_list_t *pl = cbp->cb_proplist;
nvlist_t *propval;
char *strval;
char *sourceval;
boolean_t received = is_recvd_column(cbp);
for (; pl != NULL; pl = pl->pl_next) {
char *recvdval = NULL;
/*
* Skip the special fake placeholder. This will also skip over
* the name property when 'all' is specified.
*/
if (pl->pl_prop == ZFS_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop != ZPROP_INVAL) {
if (zfs_prop_get(zhp, pl->pl_prop, buf,
sizeof (buf), &sourcetype, source,
sizeof (source),
cbp->cb_literal) != 0) {
if (pl->pl_all)
continue;
if (!zfs_prop_valid_for_type(pl->pl_prop,
ZFS_TYPE_DATASET, B_FALSE)) {
(void) fprintf(stderr,
gettext("No such property '%s'\n"),
zfs_prop_to_name(pl->pl_prop));
continue;
}
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
if (received && (zfs_prop_get_recvd(zhp,
zfs_prop_to_name(pl->pl_prop), rbuf, sizeof (rbuf),
cbp->cb_literal) == 0))
recvdval = rbuf;
zprop_print_one_property(zfs_get_name(zhp), cbp,
zfs_prop_to_name(pl->pl_prop),
buf, sourcetype, source, recvdval);
} else if (zfs_prop_userquota(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
if (zfs_prop_get_userquota(zhp, pl->pl_user_prop,
buf, sizeof (buf), cbp->cb_literal) != 0) {
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL);
} else if (zfs_prop_written(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
if (zfs_prop_get_written(zhp, pl->pl_user_prop,
buf, sizeof (buf), cbp->cb_literal) != 0) {
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL);
} else {
if (nvlist_lookup_nvlist(user_props,
pl->pl_user_prop, &propval) != 0) {
if (pl->pl_all)
continue;
sourcetype = ZPROP_SRC_NONE;
strval = "-";
} else {
verify(nvlist_lookup_string(propval,
ZPROP_VALUE, &strval) == 0);
verify(nvlist_lookup_string(propval,
ZPROP_SOURCE, &sourceval) == 0);
if (strcmp(sourceval,
zfs_get_name(zhp)) == 0) {
sourcetype = ZPROP_SRC_LOCAL;
} else if (strcmp(sourceval,
ZPROP_SOURCE_VAL_RECVD) == 0) {
sourcetype = ZPROP_SRC_RECEIVED;
} else {
sourcetype = ZPROP_SRC_INHERITED;
(void) strlcpy(source,
sourceval, sizeof (source));
}
}
if (received && (zfs_prop_get_recvd(zhp,
pl->pl_user_prop, rbuf, sizeof (rbuf),
cbp->cb_literal) == 0))
recvdval = rbuf;
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, strval, sourcetype,
source, recvdval);
}
}
return (0);
}
static int
zfs_do_get(int argc, char **argv)
{
zprop_get_cbdata_t cb = { 0 };
int i, c, flags = ZFS_ITER_ARGS_CAN_BE_PATHS;
int types = ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK;
char *value, *fields;
int ret = 0;
int limit = 0;
zprop_list_t fake_name = { 0 };
/*
* Set up default columns and sources.
*/
cb.cb_sources = ZPROP_SRC_ALL;
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
cb.cb_type = ZFS_TYPE_DATASET;
/* check options */
while ((c = getopt(argc, argv, ":d:o:s:rt:Hp")) != -1) {
switch (c) {
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'd':
limit = parse_depth(optarg, &flags);
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case 'o':
/*
* Process the set of columns to display. We zero out
* the structure to give us a blank slate.
*/
bzero(&cb.cb_columns, sizeof (cb.cb_columns));
i = 0;
while (*optarg != '\0') {
static char *col_subopts[] =
{ "name", "property", "value", "received",
"source", "all", NULL };
if (i == ZFS_GET_NCOLS) {
(void) fprintf(stderr, gettext("too "
"many fields given to -o "
"option\n"));
usage(B_FALSE);
}
switch (getsubopt(&optarg, col_subopts,
&value)) {
case 0:
cb.cb_columns[i++] = GET_COL_NAME;
break;
case 1:
cb.cb_columns[i++] = GET_COL_PROPERTY;
break;
case 2:
cb.cb_columns[i++] = GET_COL_VALUE;
break;
case 3:
cb.cb_columns[i++] = GET_COL_RECVD;
flags |= ZFS_ITER_RECVD_PROPS;
break;
case 4:
cb.cb_columns[i++] = GET_COL_SOURCE;
break;
case 5:
if (i > 0) {
(void) fprintf(stderr,
gettext("\"all\" conflicts "
"with specific fields "
"given to -o option\n"));
usage(B_FALSE);
}
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_RECVD;
cb.cb_columns[4] = GET_COL_SOURCE;
flags |= ZFS_ITER_RECVD_PROPS;
i = ZFS_GET_NCOLS;
break;
default:
(void) fprintf(stderr,
gettext("invalid column name "
"'%s'\n"), value);
usage(B_FALSE);
}
}
break;
case 's':
cb.cb_sources = 0;
while (*optarg != '\0') {
static char *source_subopts[] = {
"local", "default", "inherited",
"received", "temporary", "none",
NULL };
switch (getsubopt(&optarg, source_subopts,
&value)) {
case 0:
cb.cb_sources |= ZPROP_SRC_LOCAL;
break;
case 1:
cb.cb_sources |= ZPROP_SRC_DEFAULT;
break;
case 2:
cb.cb_sources |= ZPROP_SRC_INHERITED;
break;
case 3:
cb.cb_sources |= ZPROP_SRC_RECEIVED;
break;
case 4:
cb.cb_sources |= ZPROP_SRC_TEMPORARY;
break;
case 5:
cb.cb_sources |= ZPROP_SRC_NONE;
break;
default:
(void) fprintf(stderr,
gettext("invalid source "
"'%s'\n"), value);
usage(B_FALSE);
}
}
break;
case 't':
types = 0;
flags &= ~ZFS_ITER_PROP_LISTSNAPS;
while (*optarg != '\0') {
static char *type_subopts[] = { "filesystem",
"volume", "snapshot", "snap", "bookmark",
"all", NULL };
switch (getsubopt(&optarg, type_subopts,
&value)) {
case 0:
types |= ZFS_TYPE_FILESYSTEM;
break;
case 1:
types |= ZFS_TYPE_VOLUME;
break;
case 2:
case 3:
types |= ZFS_TYPE_SNAPSHOT;
break;
case 4:
types |= ZFS_TYPE_BOOKMARK;
break;
case 5:
types = ZFS_TYPE_DATASET |
ZFS_TYPE_BOOKMARK;
break;
default:
(void) fprintf(stderr,
gettext("invalid type '%s'\n"),
value);
usage(B_FALSE);
}
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property "
"argument\n"));
usage(B_FALSE);
}
fields = argv[0];
/*
* Handle users who want to get all snapshots or bookmarks
* of a dataset (ex. 'zfs get -t snapshot refer <dataset>').
*/
if ((types == ZFS_TYPE_SNAPSHOT || types == ZFS_TYPE_BOOKMARK) &&
argc > 1 && (flags & ZFS_ITER_RECURSE) == 0 && limit == 0) {
flags |= (ZFS_ITER_DEPTH_LIMIT | ZFS_ITER_RECURSE);
limit = 1;
}
if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET)
!= 0)
usage(B_FALSE);
argc--;
argv++;
/*
* As part of zfs_expand_proplist(), we keep track of the maximum column
* width for each property. For the 'NAME' (and 'SOURCE') columns, we
* need to know the maximum name length. However, the user likely did
* not specify 'name' as one of the properties to fetch, so we need to
* make sure we always include at least this property for
* print_get_headers() to work properly.
*/
if (cb.cb_proplist != NULL) {
fake_name.pl_prop = ZFS_PROP_NAME;
fake_name.pl_width = strlen(gettext("NAME"));
fake_name.pl_next = cb.cb_proplist;
cb.cb_proplist = &fake_name;
}
cb.cb_first = B_TRUE;
/* run for each object */
ret = zfs_for_each(argc, argv, flags, types, NULL,
&cb.cb_proplist, limit, get_callback, &cb);
if (cb.cb_proplist == &fake_name)
zprop_free_list(fake_name.pl_next);
else
zprop_free_list(cb.cb_proplist);
return (ret);
}
/*
* inherit [-rS] <property> <fs|vol> ...
*
* -r Recurse over all children
* -S Revert to received value, if any
*
* For each dataset specified on the command line, inherit the given property
* from its parent. Inheriting a property at the pool level will cause it to
* use the default value. The '-r' flag will recurse over all children, and is
* useful for setting a property on a hierarchy-wide basis, regardless of any
* local modifications for each dataset.
*/
typedef struct inherit_cbdata {
const char *cb_propname;
boolean_t cb_received;
} inherit_cbdata_t;
static int
inherit_recurse_cb(zfs_handle_t *zhp, void *data)
{
inherit_cbdata_t *cb = data;
zfs_prop_t prop = zfs_name_to_prop(cb->cb_propname);
/*
* If we're doing it recursively, then ignore properties that
* are not valid for this type of dataset.
*/
if (prop != ZPROP_INVAL &&
!zfs_prop_valid_for_type(prop, zfs_get_type(zhp), B_FALSE))
return (0);
return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0);
}
static int
inherit_cb(zfs_handle_t *zhp, void *data)
{
inherit_cbdata_t *cb = data;
return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0);
}
static int
zfs_do_inherit(int argc, char **argv)
{
int c;
zfs_prop_t prop;
inherit_cbdata_t cb = { 0 };
char *propname;
int ret = 0;
int flags = 0;
boolean_t received = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "rS")) != -1) {
switch (c) {
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'S':
received = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
propname = argv[0];
argc--;
argv++;
if ((prop = zfs_name_to_prop(propname)) != ZPROP_INVAL) {
if (zfs_prop_readonly(prop)) {
(void) fprintf(stderr, gettext(
"%s property is read-only\n"),
propname);
return (1);
}
if (!zfs_prop_inheritable(prop) && !received) {
(void) fprintf(stderr, gettext("'%s' property cannot "
"be inherited\n"), propname);
if (prop == ZFS_PROP_QUOTA ||
prop == ZFS_PROP_RESERVATION ||
prop == ZFS_PROP_REFQUOTA ||
prop == ZFS_PROP_REFRESERVATION) {
(void) fprintf(stderr, gettext("use 'zfs set "
"%s=none' to clear\n"), propname);
(void) fprintf(stderr, gettext("use 'zfs "
"inherit -S %s' to revert to received "
"value\n"), propname);
}
return (1);
}
if (received && (prop == ZFS_PROP_VOLSIZE ||
prop == ZFS_PROP_VERSION)) {
(void) fprintf(stderr, gettext("'%s' property cannot "
"be reverted to a received value\n"), propname);
return (1);
}
} else if (!zfs_prop_user(propname)) {
(void) fprintf(stderr, gettext("invalid property '%s'\n"),
propname);
usage(B_FALSE);
}
cb.cb_propname = propname;
cb.cb_received = received;
if (flags & ZFS_ITER_RECURSE) {
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET,
NULL, NULL, 0, inherit_recurse_cb, &cb);
} else {
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET,
NULL, NULL, 0, inherit_cb, &cb);
}
return (ret);
}
typedef struct upgrade_cbdata {
uint64_t cb_numupgraded;
uint64_t cb_numsamegraded;
uint64_t cb_numfailed;
uint64_t cb_version;
boolean_t cb_newer;
boolean_t cb_foundone;
char cb_lastfs[ZFS_MAX_DATASET_NAME_LEN];
} upgrade_cbdata_t;
static int
same_pool(zfs_handle_t *zhp, const char *name)
{
int len1 = strcspn(name, "/@");
const char *zhname = zfs_get_name(zhp);
int len2 = strcspn(zhname, "/@");
if (len1 != len2)
return (B_FALSE);
return (strncmp(name, zhname, len1) == 0);
}
static int
upgrade_list_callback(zfs_handle_t *zhp, void *data)
{
upgrade_cbdata_t *cb = data;
int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
/* list if it's old/new */
if ((!cb->cb_newer && version < ZPL_VERSION) ||
(cb->cb_newer && version > ZPL_VERSION)) {
char *str;
if (cb->cb_newer) {
str = gettext("The following filesystems are "
"formatted using a newer software version and\n"
"cannot be accessed on the current system.\n\n");
} else {
str = gettext("The following filesystems are "
"out of date, and can be upgraded. After being\n"
"upgraded, these filesystems (and any 'zfs send' "
"streams generated from\n"
"subsequent snapshots) will no longer be "
"accessible by older software versions.\n\n");
}
if (!cb->cb_foundone) {
(void) puts(str);
(void) printf(gettext("VER FILESYSTEM\n"));
(void) printf(gettext("--- ------------\n"));
cb->cb_foundone = B_TRUE;
}
(void) printf("%2u %s\n", version, zfs_get_name(zhp));
}
return (0);
}
static int
upgrade_set_callback(zfs_handle_t *zhp, void *data)
{
upgrade_cbdata_t *cb = data;
int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int needed_spa_version;
int spa_version;
if (zfs_spa_version(zhp, &spa_version) < 0)
return (-1);
needed_spa_version = zfs_spa_version_map(cb->cb_version);
if (needed_spa_version < 0)
return (-1);
if (spa_version < needed_spa_version) {
/* can't upgrade */
(void) printf(gettext("%s: can not be "
"upgraded; the pool version needs to first "
"be upgraded\nto version %d\n\n"),
zfs_get_name(zhp), needed_spa_version);
cb->cb_numfailed++;
return (0);
}
/* upgrade */
if (version < cb->cb_version) {
char verstr[16];
(void) snprintf(verstr, sizeof (verstr),
"%llu", (u_longlong_t)cb->cb_version);
if (cb->cb_lastfs[0] && !same_pool(zhp, cb->cb_lastfs)) {
/*
* If they did "zfs upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (zfs_prop_set(zhp, "version", verstr) == 0)
cb->cb_numupgraded++;
else
cb->cb_numfailed++;
(void) strcpy(cb->cb_lastfs, zfs_get_name(zhp));
} else if (version > cb->cb_version) {
/* can't downgrade */
(void) printf(gettext("%s: can not be downgraded; "
"it is already at version %u\n"),
zfs_get_name(zhp), version);
cb->cb_numfailed++;
} else {
cb->cb_numsamegraded++;
}
return (0);
}
/*
* zfs upgrade
* zfs upgrade -v
* zfs upgrade [-r] [-V <version>] <-a | filesystem>
*/
static int
zfs_do_upgrade(int argc, char **argv)
{
boolean_t all = B_FALSE;
boolean_t showversions = B_FALSE;
int ret = 0;
upgrade_cbdata_t cb = { 0 };
int c;
int flags = ZFS_ITER_ARGS_CAN_BE_PATHS;
/* check options */
while ((c = getopt(argc, argv, "rvV:a")) != -1) {
switch (c) {
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'v':
showversions = B_TRUE;
break;
case 'V':
if (zfs_prop_string_to_index(ZFS_PROP_VERSION,
optarg, &cb.cb_version) != 0) {
(void) fprintf(stderr,
gettext("invalid version %s\n"), optarg);
usage(B_FALSE);
}
break;
case 'a':
all = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if ((!all && !argc) && ((flags & ZFS_ITER_RECURSE) | cb.cb_version))
usage(B_FALSE);
if (showversions && (flags & ZFS_ITER_RECURSE || all ||
cb.cb_version || argc))
usage(B_FALSE);
if ((all || argc) && (showversions))
usage(B_FALSE);
if (all && argc)
usage(B_FALSE);
if (showversions) {
/* Show info on available versions. */
(void) printf(gettext("The following filesystem versions are "
"supported:\n\n"));
(void) printf(gettext("VER DESCRIPTION\n"));
(void) printf("--- -----------------------------------------"
"---------------\n");
(void) printf(gettext(" 1 Initial ZFS filesystem version\n"));
(void) printf(gettext(" 2 Enhanced directory entries\n"));
(void) printf(gettext(" 3 Case insensitive and filesystem "
"user identifier (FUID)\n"));
(void) printf(gettext(" 4 userquota, groupquota "
"properties\n"));
(void) printf(gettext(" 5 System attributes\n"));
(void) printf(gettext("\nFor more information on a particular "
"version, including supported releases,\n"));
(void) printf("see the ZFS Administration Guide.\n\n");
ret = 0;
} else if (argc || all) {
/* Upgrade filesystems */
if (cb.cb_version == 0)
cb.cb_version = ZPL_VERSION;
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_set_callback, &cb);
(void) printf(gettext("%llu filesystems upgraded\n"),
(u_longlong_t)cb.cb_numupgraded);
if (cb.cb_numsamegraded) {
(void) printf(gettext("%llu filesystems already at "
"this version\n"),
(u_longlong_t)cb.cb_numsamegraded);
}
if (cb.cb_numfailed != 0)
ret = 1;
} else {
/* List old-version filesystems */
boolean_t found;
(void) printf(gettext("This system is currently running "
"ZFS filesystem version %llu.\n\n"), ZPL_VERSION);
flags |= ZFS_ITER_RECURSE;
ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_list_callback, &cb);
found = cb.cb_foundone;
cb.cb_foundone = B_FALSE;
cb.cb_newer = B_TRUE;
ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_list_callback, &cb);
if (!cb.cb_foundone && !found) {
(void) printf(gettext("All filesystems are "
"formatted with the current version.\n"));
}
}
return (ret);
}
/*
* zfs userspace [-Hinp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] [-t type[,...]]
* filesystem | snapshot | path
* zfs groupspace [-Hinp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] [-t type[,...]]
* filesystem | snapshot | path
* zfs projectspace [-Hp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] filesystem | snapshot | path
*
* -H Scripted mode; elide headers and separate columns by tabs.
* -i Translate SID to POSIX ID.
* -n Print numeric ID instead of user/group name.
* -o Control which fields to display.
* -p Use exact (parsable) numeric output.
* -s Specify sort columns, descending order.
* -S Specify sort columns, ascending order.
* -t Control which object types to display.
*
* Displays space consumed by, and quotas on, each user in the specified
* filesystem or snapshot.
*/
/* us_field_types, us_field_hdr and us_field_names should be kept in sync */
enum us_field_types {
USFIELD_TYPE,
USFIELD_NAME,
USFIELD_USED,
USFIELD_QUOTA,
USFIELD_OBJUSED,
USFIELD_OBJQUOTA
};
static char *us_field_hdr[] = { "TYPE", "NAME", "USED", "QUOTA",
"OBJUSED", "OBJQUOTA" };
static char *us_field_names[] = { "type", "name", "used", "quota",
"objused", "objquota" };
#define USFIELD_LAST (sizeof (us_field_names) / sizeof (char *))
#define USTYPE_PSX_GRP (1 << 0)
#define USTYPE_PSX_USR (1 << 1)
#define USTYPE_SMB_GRP (1 << 2)
#define USTYPE_SMB_USR (1 << 3)
#define USTYPE_PROJ (1 << 4)
#define USTYPE_ALL \
(USTYPE_PSX_GRP | USTYPE_PSX_USR | USTYPE_SMB_GRP | USTYPE_SMB_USR | \
USTYPE_PROJ)
static int us_type_bits[] = {
USTYPE_PSX_GRP,
USTYPE_PSX_USR,
USTYPE_SMB_GRP,
USTYPE_SMB_USR,
USTYPE_ALL
};
static char *us_type_names[] = { "posixgroup", "posixuser", "smbgroup",
"smbuser", "all" };
typedef struct us_node {
nvlist_t *usn_nvl;
uu_avl_node_t usn_avlnode;
uu_list_node_t usn_listnode;
} us_node_t;
typedef struct us_cbdata {
nvlist_t **cb_nvlp;
uu_avl_pool_t *cb_avl_pool;
uu_avl_t *cb_avl;
boolean_t cb_numname;
boolean_t cb_nicenum;
boolean_t cb_sid2posix;
zfs_userquota_prop_t cb_prop;
zfs_sort_column_t *cb_sortcol;
size_t cb_width[USFIELD_LAST];
} us_cbdata_t;
static boolean_t us_populated = B_FALSE;
typedef struct {
zfs_sort_column_t *si_sortcol;
boolean_t si_numname;
} us_sort_info_t;
static int
us_field_index(char *field)
{
int i;
for (i = 0; i < USFIELD_LAST; i++) {
if (strcmp(field, us_field_names[i]) == 0)
return (i);
}
return (-1);
}
static int
us_compare(const void *larg, const void *rarg, void *unused)
{
const us_node_t *l = larg;
const us_node_t *r = rarg;
us_sort_info_t *si = (us_sort_info_t *)unused;
zfs_sort_column_t *sortcol = si->si_sortcol;
boolean_t numname = si->si_numname;
nvlist_t *lnvl = l->usn_nvl;
nvlist_t *rnvl = r->usn_nvl;
int rc = 0;
boolean_t lvb, rvb;
for (; sortcol != NULL; sortcol = sortcol->sc_next) {
char *lvstr = "";
char *rvstr = "";
uint32_t lv32 = 0;
uint32_t rv32 = 0;
uint64_t lv64 = 0;
uint64_t rv64 = 0;
zfs_prop_t prop = sortcol->sc_prop;
const char *propname = NULL;
boolean_t reverse = sortcol->sc_reverse;
switch (prop) {
case ZFS_PROP_TYPE:
propname = "type";
(void) nvlist_lookup_uint32(lnvl, propname, &lv32);
(void) nvlist_lookup_uint32(rnvl, propname, &rv32);
if (rv32 != lv32)
rc = (rv32 < lv32) ? 1 : -1;
break;
case ZFS_PROP_NAME:
propname = "name";
if (numname) {
compare_nums:
(void) nvlist_lookup_uint64(lnvl, propname,
&lv64);
(void) nvlist_lookup_uint64(rnvl, propname,
&rv64);
if (rv64 != lv64)
rc = (rv64 < lv64) ? 1 : -1;
} else {
if ((nvlist_lookup_string(lnvl, propname,
&lvstr) == ENOENT) ||
(nvlist_lookup_string(rnvl, propname,
&rvstr) == ENOENT)) {
goto compare_nums;
}
rc = strcmp(lvstr, rvstr);
}
break;
case ZFS_PROP_USED:
case ZFS_PROP_QUOTA:
if (!us_populated)
break;
if (prop == ZFS_PROP_USED)
propname = "used";
else
propname = "quota";
(void) nvlist_lookup_uint64(lnvl, propname, &lv64);
(void) nvlist_lookup_uint64(rnvl, propname, &rv64);
if (rv64 != lv64)
rc = (rv64 < lv64) ? 1 : -1;
break;
default:
break;
}
if (rc != 0) {
if (rc < 0)
return (reverse ? 1 : -1);
else
return (reverse ? -1 : 1);
}
}
/*
* If entries still seem to be the same, check if they are of the same
* type (smbentity is added only if we are doing SID to POSIX ID
* translation where we can have duplicate type/name combinations).
*/
if (nvlist_lookup_boolean_value(lnvl, "smbentity", &lvb) == 0 &&
nvlist_lookup_boolean_value(rnvl, "smbentity", &rvb) == 0 &&
lvb != rvb)
return (lvb < rvb ? -1 : 1);
return (0);
}
static boolean_t
zfs_prop_is_user(unsigned p)
{
return (p == ZFS_PROP_USERUSED || p == ZFS_PROP_USERQUOTA ||
p == ZFS_PROP_USEROBJUSED || p == ZFS_PROP_USEROBJQUOTA);
}
static boolean_t
zfs_prop_is_group(unsigned p)
{
return (p == ZFS_PROP_GROUPUSED || p == ZFS_PROP_GROUPQUOTA ||
p == ZFS_PROP_GROUPOBJUSED || p == ZFS_PROP_GROUPOBJQUOTA);
}
static boolean_t
zfs_prop_is_project(unsigned p)
{
return (p == ZFS_PROP_PROJECTUSED || p == ZFS_PROP_PROJECTQUOTA ||
p == ZFS_PROP_PROJECTOBJUSED || p == ZFS_PROP_PROJECTOBJQUOTA);
}
static inline const char *
us_type2str(unsigned field_type)
{
switch (field_type) {
case USTYPE_PSX_USR:
return ("POSIX User");
case USTYPE_PSX_GRP:
return ("POSIX Group");
case USTYPE_SMB_USR:
return ("SMB User");
case USTYPE_SMB_GRP:
return ("SMB Group");
case USTYPE_PROJ:
return ("Project");
default:
return ("Undefined");
}
}
static int
userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space)
{
us_cbdata_t *cb = (us_cbdata_t *)arg;
zfs_userquota_prop_t prop = cb->cb_prop;
char *name = NULL;
char *propname;
char sizebuf[32];
us_node_t *node;
uu_avl_pool_t *avl_pool = cb->cb_avl_pool;
uu_avl_t *avl = cb->cb_avl;
uu_avl_index_t idx;
nvlist_t *props;
us_node_t *n;
zfs_sort_column_t *sortcol = cb->cb_sortcol;
unsigned type = 0;
const char *typestr;
size_t namelen;
size_t typelen;
size_t sizelen;
int typeidx, nameidx, sizeidx;
us_sort_info_t sortinfo = { sortcol, cb->cb_numname };
boolean_t smbentity = B_FALSE;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
node = safe_malloc(sizeof (us_node_t));
uu_avl_node_init(node, &node->usn_avlnode, avl_pool);
node->usn_nvl = props;
if (domain != NULL && domain[0] != '\0') {
#ifdef HAVE_IDMAP
/* SMB */
char sid[MAXNAMELEN + 32];
uid_t id;
uint64_t classes;
int err;
directory_error_t e;
smbentity = B_TRUE;
(void) snprintf(sid, sizeof (sid), "%s-%u", domain, rid);
if (prop == ZFS_PROP_GROUPUSED || prop == ZFS_PROP_GROUPQUOTA) {
type = USTYPE_SMB_GRP;
err = sid_to_id(sid, B_FALSE, &id);
} else {
type = USTYPE_SMB_USR;
err = sid_to_id(sid, B_TRUE, &id);
}
if (err == 0) {
rid = id;
if (!cb->cb_sid2posix) {
e = directory_name_from_sid(NULL, sid, &name,
&classes);
if (e != NULL)
directory_error_free(e);
if (name == NULL)
name = sid;
}
}
#else
nvlist_free(props);
free(node);
return (-1);
#endif /* HAVE_IDMAP */
}
if (cb->cb_sid2posix || domain == NULL || domain[0] == '\0') {
/* POSIX or -i */
if (zfs_prop_is_group(prop)) {
type = USTYPE_PSX_GRP;
if (!cb->cb_numname) {
struct group *g;
if ((g = getgrgid(rid)) != NULL)
name = g->gr_name;
}
} else if (zfs_prop_is_user(prop)) {
type = USTYPE_PSX_USR;
if (!cb->cb_numname) {
struct passwd *p;
if ((p = getpwuid(rid)) != NULL)
name = p->pw_name;
}
} else {
type = USTYPE_PROJ;
}
}
/*
* Make sure that the type/name combination is unique when doing
* SID to POSIX ID translation (hence changing the type from SMB to
* POSIX).
*/
if (cb->cb_sid2posix &&
nvlist_add_boolean_value(props, "smbentity", smbentity) != 0)
nomem();
/* Calculate/update width of TYPE field */
typestr = us_type2str(type);
typelen = strlen(gettext(typestr));
typeidx = us_field_index("type");
if (typelen > cb->cb_width[typeidx])
cb->cb_width[typeidx] = typelen;
if (nvlist_add_uint32(props, "type", type) != 0)
nomem();
/* Calculate/update width of NAME field */
if ((cb->cb_numname && cb->cb_sid2posix) || name == NULL) {
if (nvlist_add_uint64(props, "name", rid) != 0)
nomem();
namelen = snprintf(NULL, 0, "%u", rid);
} else {
if (nvlist_add_string(props, "name", name) != 0)
nomem();
namelen = strlen(name);
}
nameidx = us_field_index("name");
if (nameidx >= 0 && namelen > cb->cb_width[nameidx])
cb->cb_width[nameidx] = namelen;
/*
* Check if this type/name combination is in the list and update it;
* otherwise add new node to the list.
*/
if ((n = uu_avl_find(avl, node, &sortinfo, &idx)) == NULL) {
uu_avl_insert(avl, node, idx);
} else {
nvlist_free(props);
free(node);
node = n;
props = node->usn_nvl;
}
/* Calculate/update width of USED/QUOTA fields */
if (cb->cb_nicenum) {
if (prop == ZFS_PROP_USERUSED || prop == ZFS_PROP_GROUPUSED ||
prop == ZFS_PROP_USERQUOTA || prop == ZFS_PROP_GROUPQUOTA ||
prop == ZFS_PROP_PROJECTUSED ||
prop == ZFS_PROP_PROJECTQUOTA) {
zfs_nicebytes(space, sizebuf, sizeof (sizebuf));
} else {
zfs_nicenum(space, sizebuf, sizeof (sizebuf));
}
} else {
(void) snprintf(sizebuf, sizeof (sizebuf), "%llu",
(u_longlong_t)space);
}
sizelen = strlen(sizebuf);
if (prop == ZFS_PROP_USERUSED || prop == ZFS_PROP_GROUPUSED ||
prop == ZFS_PROP_PROJECTUSED) {
propname = "used";
if (!nvlist_exists(props, "quota"))
(void) nvlist_add_uint64(props, "quota", 0);
} else if (prop == ZFS_PROP_USERQUOTA || prop == ZFS_PROP_GROUPQUOTA ||
prop == ZFS_PROP_PROJECTQUOTA) {
propname = "quota";
if (!nvlist_exists(props, "used"))
(void) nvlist_add_uint64(props, "used", 0);
} else if (prop == ZFS_PROP_USEROBJUSED ||
prop == ZFS_PROP_GROUPOBJUSED || prop == ZFS_PROP_PROJECTOBJUSED) {
propname = "objused";
if (!nvlist_exists(props, "objquota"))
(void) nvlist_add_uint64(props, "objquota", 0);
} else if (prop == ZFS_PROP_USEROBJQUOTA ||
prop == ZFS_PROP_GROUPOBJQUOTA ||
prop == ZFS_PROP_PROJECTOBJQUOTA) {
propname = "objquota";
if (!nvlist_exists(props, "objused"))
(void) nvlist_add_uint64(props, "objused", 0);
} else {
return (-1);
}
sizeidx = us_field_index(propname);
if (sizeidx >= 0 && sizelen > cb->cb_width[sizeidx])
cb->cb_width[sizeidx] = sizelen;
if (nvlist_add_uint64(props, propname, space) != 0)
nomem();
return (0);
}
static void
print_us_node(boolean_t scripted, boolean_t parsable, int *fields, int types,
size_t *width, us_node_t *node)
{
nvlist_t *nvl = node->usn_nvl;
char valstr[MAXNAMELEN];
boolean_t first = B_TRUE;
int cfield = 0;
int field;
uint32_t ustype;
/* Check type */
(void) nvlist_lookup_uint32(nvl, "type", &ustype);
if (!(ustype & types))
return;
while ((field = fields[cfield]) != USFIELD_LAST) {
nvpair_t *nvp = NULL;
data_type_t type;
uint32_t val32;
uint64_t val64;
char *strval = "-";
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
if (strcmp(nvpair_name(nvp),
us_field_names[field]) == 0)
break;
}
type = nvp == NULL ? DATA_TYPE_UNKNOWN : nvpair_type(nvp);
switch (type) {
case DATA_TYPE_UINT32:
(void) nvpair_value_uint32(nvp, &val32);
break;
case DATA_TYPE_UINT64:
(void) nvpair_value_uint64(nvp, &val64);
break;
case DATA_TYPE_STRING:
(void) nvpair_value_string(nvp, &strval);
break;
case DATA_TYPE_UNKNOWN:
break;
default:
(void) fprintf(stderr, "invalid data type\n");
}
switch (field) {
case USFIELD_TYPE:
if (type == DATA_TYPE_UINT32)
strval = (char *)us_type2str(val32);
break;
case USFIELD_NAME:
if (type == DATA_TYPE_UINT64) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
}
break;
case USFIELD_USED:
case USFIELD_QUOTA:
if (type == DATA_TYPE_UINT64) {
if (parsable) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
} else if (field == USFIELD_QUOTA &&
val64 == 0) {
strval = "none";
} else {
zfs_nicebytes(val64, valstr,
sizeof (valstr));
strval = valstr;
}
}
break;
case USFIELD_OBJUSED:
case USFIELD_OBJQUOTA:
if (type == DATA_TYPE_UINT64) {
if (parsable) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
} else if (field == USFIELD_OBJQUOTA &&
val64 == 0) {
strval = "none";
} else {
zfs_nicenum(val64, valstr,
sizeof (valstr));
strval = valstr;
}
}
break;
}
if (!first) {
if (scripted)
(void) printf("\t");
else
(void) printf(" ");
}
if (scripted)
(void) printf("%s", strval);
else if (field == USFIELD_TYPE || field == USFIELD_NAME)
(void) printf("%-*s", (int)width[field], strval);
else
(void) printf("%*s", (int)width[field], strval);
first = B_FALSE;
cfield++;
}
(void) printf("\n");
}
static void
print_us(boolean_t scripted, boolean_t parsable, int *fields, int types,
size_t *width, boolean_t rmnode, uu_avl_t *avl)
{
us_node_t *node;
const char *col;
int cfield = 0;
int field;
if (!scripted) {
boolean_t first = B_TRUE;
while ((field = fields[cfield]) != USFIELD_LAST) {
col = gettext(us_field_hdr[field]);
if (field == USFIELD_TYPE || field == USFIELD_NAME) {
(void) printf(first ? "%-*s" : " %-*s",
(int)width[field], col);
} else {
(void) printf(first ? "%*s" : " %*s",
(int)width[field], col);
}
first = B_FALSE;
cfield++;
}
(void) printf("\n");
}
for (node = uu_avl_first(avl); node; node = uu_avl_next(avl, node)) {
print_us_node(scripted, parsable, fields, types, width, node);
if (rmnode)
nvlist_free(node->usn_nvl);
}
}
static int
zfs_do_userspace(int argc, char **argv)
{
zfs_handle_t *zhp;
zfs_userquota_prop_t p;
uu_avl_pool_t *avl_pool;
uu_avl_t *avl_tree;
uu_avl_walk_t *walk;
char *delim;
char deffields[] = "type,name,used,quota,objused,objquota";
char *ofield = NULL;
char *tfield = NULL;
int cfield = 0;
int fields[256];
int i;
boolean_t scripted = B_FALSE;
boolean_t prtnum = B_FALSE;
boolean_t parsable = B_FALSE;
boolean_t sid2posix = B_FALSE;
int ret = 0;
int c;
zfs_sort_column_t *sortcol = NULL;
int types = USTYPE_PSX_USR | USTYPE_SMB_USR;
us_cbdata_t cb;
us_node_t *node;
us_node_t *rmnode;
uu_list_pool_t *listpool;
uu_list_t *list;
uu_avl_index_t idx = 0;
uu_list_index_t idx2 = 0;
if (argc < 2)
usage(B_FALSE);
if (strcmp(argv[0], "groupspace") == 0) {
/* Toggle default group types */
types = USTYPE_PSX_GRP | USTYPE_SMB_GRP;
} else if (strcmp(argv[0], "projectspace") == 0) {
types = USTYPE_PROJ;
prtnum = B_TRUE;
}
while ((c = getopt(argc, argv, "nHpo:s:S:t:i")) != -1) {
switch (c) {
case 'n':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 'n'\n"));
usage(B_FALSE);
}
prtnum = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 'o':
ofield = optarg;
break;
case 's':
case 'S':
if (zfs_add_sort_column(&sortcol, optarg,
c == 's' ? B_FALSE : B_TRUE) != 0) {
(void) fprintf(stderr,
gettext("invalid field '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 't':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 't'\n"));
usage(B_FALSE);
}
tfield = optarg;
break;
case 'i':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 'i'\n"));
usage(B_FALSE);
}
sid2posix = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing dataset name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/* Use default output fields if not specified using -o */
if (ofield == NULL)
ofield = deffields;
do {
if ((delim = strchr(ofield, ',')) != NULL)
*delim = '\0';
if ((fields[cfield++] = us_field_index(ofield)) == -1) {
(void) fprintf(stderr, gettext("invalid type '%s' "
"for -o option\n"), ofield);
return (-1);
}
if (delim != NULL)
ofield = delim + 1;
} while (delim != NULL);
fields[cfield] = USFIELD_LAST;
/* Override output types (-t option) */
if (tfield != NULL) {
types = 0;
do {
boolean_t found = B_FALSE;
if ((delim = strchr(tfield, ',')) != NULL)
*delim = '\0';
for (i = 0; i < sizeof (us_type_bits) / sizeof (int);
i++) {
if (strcmp(tfield, us_type_names[i]) == 0) {
found = B_TRUE;
types |= us_type_bits[i];
break;
}
}
if (!found) {
(void) fprintf(stderr, gettext("invalid type "
"'%s' for -t option\n"), tfield);
return (-1);
}
if (delim != NULL)
tfield = delim + 1;
} while (delim != NULL);
}
if ((zhp = zfs_path_to_zhandle(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_SNAPSHOT)) == NULL)
return (1);
if (zfs_get_underlying_type(zhp) != ZFS_TYPE_FILESYSTEM) {
(void) fprintf(stderr, gettext("operation is only applicable "
"to filesystems and their snapshots\n"));
zfs_close(zhp);
return (1);
}
if ((avl_pool = uu_avl_pool_create("us_avl_pool", sizeof (us_node_t),
offsetof(us_node_t, usn_avlnode), us_compare, UU_DEFAULT)) == NULL)
nomem();
if ((avl_tree = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL)
nomem();
/* Always add default sorting columns */
(void) zfs_add_sort_column(&sortcol, "type", B_FALSE);
(void) zfs_add_sort_column(&sortcol, "name", B_FALSE);
cb.cb_sortcol = sortcol;
cb.cb_numname = prtnum;
cb.cb_nicenum = !parsable;
cb.cb_avl_pool = avl_pool;
cb.cb_avl = avl_tree;
cb.cb_sid2posix = sid2posix;
for (i = 0; i < USFIELD_LAST; i++)
cb.cb_width[i] = strlen(gettext(us_field_hdr[i]));
for (p = 0; p < ZFS_NUM_USERQUOTA_PROPS; p++) {
if ((zfs_prop_is_user(p) &&
!(types & (USTYPE_PSX_USR | USTYPE_SMB_USR))) ||
(zfs_prop_is_group(p) &&
!(types & (USTYPE_PSX_GRP | USTYPE_SMB_GRP))) ||
(zfs_prop_is_project(p) && types != USTYPE_PROJ))
continue;
cb.cb_prop = p;
if ((ret = zfs_userspace(zhp, p, userspace_cb, &cb)) != 0) {
zfs_close(zhp);
return (ret);
}
}
zfs_close(zhp);
/* Sort the list */
if ((node = uu_avl_first(avl_tree)) == NULL)
return (0);
us_populated = B_TRUE;
listpool = uu_list_pool_create("tmplist", sizeof (us_node_t),
offsetof(us_node_t, usn_listnode), NULL, UU_DEFAULT);
list = uu_list_create(listpool, NULL, UU_DEFAULT);
uu_list_node_init(node, &node->usn_listnode, listpool);
while (node != NULL) {
rmnode = node;
node = uu_avl_next(avl_tree, node);
uu_avl_remove(avl_tree, rmnode);
if (uu_list_find(list, rmnode, NULL, &idx2) == NULL)
uu_list_insert(list, rmnode, idx2);
}
for (node = uu_list_first(list); node != NULL;
node = uu_list_next(list, node)) {
us_sort_info_t sortinfo = { sortcol, cb.cb_numname };
if (uu_avl_find(avl_tree, node, &sortinfo, &idx) == NULL)
uu_avl_insert(avl_tree, node, idx);
}
uu_list_destroy(list);
uu_list_pool_destroy(listpool);
/* Print and free node nvlist memory */
print_us(scripted, parsable, fields, types, cb.cb_width, B_TRUE,
cb.cb_avl);
zfs_free_sort_columns(sortcol);
/* Clean up the AVL tree */
if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL)
nomem();
while ((node = uu_avl_walk_next(walk)) != NULL) {
uu_avl_remove(cb.cb_avl, node);
free(node);
}
uu_avl_walk_end(walk);
uu_avl_destroy(avl_tree);
uu_avl_pool_destroy(avl_pool);
return (ret);
}
/*
* list [-Hp][-r|-d max] [-o property[,...]] [-s property] ... [-S property]
* [-t type[,...]] [filesystem|volume|snapshot] ...
*
* -H Scripted mode; elide headers and separate columns by tabs
* -p Display values in parsable (literal) format.
* -r Recurse over all children
* -d Limit recursion by depth.
* -o Control which fields to display.
* -s Specify sort columns, descending order.
* -S Specify sort columns, ascending order.
* -t Control which object types to display.
*
* When given no arguments, list all filesystems in the system.
* Otherwise, list the specified datasets, optionally recursing down them if
* '-r' is specified.
*/
typedef struct list_cbdata {
boolean_t cb_first;
boolean_t cb_literal;
boolean_t cb_scripted;
zprop_list_t *cb_proplist;
} list_cbdata_t;
/*
* Given a list of columns to display, output appropriate headers for each one.
*/
static void
print_header(list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
char headerbuf[ZFS_MAXPROPLEN];
const char *header;
int i;
boolean_t first = B_TRUE;
boolean_t right_justify;
for (; pl != NULL; pl = pl->pl_next) {
if (!first) {
(void) printf(" ");
} else {
first = B_FALSE;
}
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_INVAL) {
header = zfs_prop_column_name(pl->pl_prop);
right_justify = zfs_prop_align_right(pl->pl_prop);
} else {
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
headerbuf[i] = toupper(pl->pl_user_prop[i]);
headerbuf[i] = '\0';
header = headerbuf;
}
if (pl->pl_next == NULL && !right_justify)
(void) printf("%s", header);
else if (right_justify)
(void) printf("%*s", (int)pl->pl_width, header);
else
(void) printf("%-*s", (int)pl->pl_width, header);
}
(void) printf("\n");
}
/*
* Given a dataset and a list of fields, print out all the properties according
* to the described layout.
*/
static void
print_dataset(zfs_handle_t *zhp, list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
boolean_t first = B_TRUE;
char property[ZFS_MAXPROPLEN];
nvlist_t *userprops = zfs_get_user_props(zhp);
nvlist_t *propval;
char *propstr;
boolean_t right_justify;
for (; pl != NULL; pl = pl->pl_next) {
if (!first) {
if (cb->cb_scripted)
(void) printf("\t");
else
(void) printf(" ");
} else {
first = B_FALSE;
}
if (pl->pl_prop == ZFS_PROP_NAME) {
(void) strlcpy(property, zfs_get_name(zhp),
sizeof (property));
propstr = property;
right_justify = zfs_prop_align_right(pl->pl_prop);
} else if (pl->pl_prop != ZPROP_INVAL) {
if (zfs_prop_get(zhp, pl->pl_prop, property,
sizeof (property), NULL, NULL, 0,
cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = zfs_prop_align_right(pl->pl_prop);
} else if (zfs_prop_userquota(pl->pl_user_prop)) {
if (zfs_prop_get_userquota(zhp, pl->pl_user_prop,
property, sizeof (property), cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = B_TRUE;
} else if (zfs_prop_written(pl->pl_user_prop)) {
if (zfs_prop_get_written(zhp, pl->pl_user_prop,
property, sizeof (property), cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = B_TRUE;
} else {
if (nvlist_lookup_nvlist(userprops,
pl->pl_user_prop, &propval) != 0)
propstr = "-";
else
verify(nvlist_lookup_string(propval,
ZPROP_VALUE, &propstr) == 0);
right_justify = B_FALSE;
}
/*
* If this is being called in scripted mode, or if this is the
* last column and it is left-justified, don't include a width
* format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
(void) printf("%s", propstr);
else if (right_justify)
(void) printf("%*s", (int)pl->pl_width, propstr);
else
(void) printf("%-*s", (int)pl->pl_width, propstr);
}
(void) printf("\n");
}
/*
* Generic callback function to list a dataset or snapshot.
*/
static int
list_callback(zfs_handle_t *zhp, void *data)
{
list_cbdata_t *cbp = data;
if (cbp->cb_first) {
if (!cbp->cb_scripted)
print_header(cbp);
cbp->cb_first = B_FALSE;
}
print_dataset(zhp, cbp);
return (0);
}
static int
zfs_do_list(int argc, char **argv)
{
int c;
static char default_fields[] =
"name,used,available,referenced,mountpoint";
int types = ZFS_TYPE_DATASET;
boolean_t types_specified = B_FALSE;
char *fields = NULL;
list_cbdata_t cb = { 0 };
char *value;
int limit = 0;
int ret = 0;
zfs_sort_column_t *sortcol = NULL;
int flags = ZFS_ITER_PROP_LISTSNAPS | ZFS_ITER_ARGS_CAN_BE_PATHS;
/* check options */
while ((c = getopt(argc, argv, "HS:d:o:prs:t:")) != -1) {
switch (c) {
case 'o':
fields = optarg;
break;
case 'p':
cb.cb_literal = B_TRUE;
flags |= ZFS_ITER_LITERAL_PROPS;
break;
case 'd':
limit = parse_depth(optarg, &flags);
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 's':
if (zfs_add_sort_column(&sortcol, optarg,
B_FALSE) != 0) {
(void) fprintf(stderr,
gettext("invalid property '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 'S':
if (zfs_add_sort_column(&sortcol, optarg,
B_TRUE) != 0) {
(void) fprintf(stderr,
gettext("invalid property '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 't':
types = 0;
types_specified = B_TRUE;
flags &= ~ZFS_ITER_PROP_LISTSNAPS;
while (*optarg != '\0') {
static char *type_subopts[] = { "filesystem",
"volume", "snapshot", "snap", "bookmark",
"all", NULL };
switch (getsubopt(&optarg, type_subopts,
&value)) {
case 0:
types |= ZFS_TYPE_FILESYSTEM;
break;
case 1:
types |= ZFS_TYPE_VOLUME;
break;
case 2:
case 3:
types |= ZFS_TYPE_SNAPSHOT;
break;
case 4:
types |= ZFS_TYPE_BOOKMARK;
break;
case 5:
types = ZFS_TYPE_DATASET |
ZFS_TYPE_BOOKMARK;
break;
default:
(void) fprintf(stderr,
gettext("invalid type '%s'\n"),
value);
usage(B_FALSE);
}
}
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (fields == NULL)
fields = default_fields;
/*
* If we are only going to list snapshot names and sort by name,
* then we can use faster version.
*/
if (strcmp(fields, "name") == 0 && zfs_sort_only_by_name(sortcol))
flags |= ZFS_ITER_SIMPLE;
/*
* If "-o space" and no types were specified, don't display snapshots.
*/
if (strcmp(fields, "space") == 0 && types_specified == B_FALSE)
types &= ~ZFS_TYPE_SNAPSHOT;
/*
* Handle users who want to list all snapshots or bookmarks
* of the current dataset (ex. 'zfs list -t snapshot <dataset>').
*/
if ((types == ZFS_TYPE_SNAPSHOT || types == ZFS_TYPE_BOOKMARK) &&
argc > 0 && (flags & ZFS_ITER_RECURSE) == 0 && limit == 0) {
flags |= (ZFS_ITER_DEPTH_LIMIT | ZFS_ITER_RECURSE);
limit = 1;
}
/*
* If the user specifies '-o all', the zprop_get_list() doesn't
* normally include the name of the dataset. For 'zfs list', we always
* want this property to be first.
*/
if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET)
!= 0)
usage(B_FALSE);
cb.cb_first = B_TRUE;
ret = zfs_for_each(argc, argv, flags, types, sortcol, &cb.cb_proplist,
limit, list_callback, &cb);
zprop_free_list(cb.cb_proplist);
zfs_free_sort_columns(sortcol);
if (ret == 0 && cb.cb_first && !cb.cb_scripted)
(void) fprintf(stderr, gettext("no datasets available\n"));
return (ret);
}
/*
* zfs rename [-fu] <fs | snap | vol> <fs | snap | vol>
* zfs rename [-f] -p <fs | vol> <fs | vol>
* zfs rename [-u] -r <snap> <snap>
*
* Renames the given dataset to another of the same type.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
* The '-u' flag prevents file systems from being remounted during rename.
*/
/* ARGSUSED */
static int
zfs_do_rename(int argc, char **argv)
{
zfs_handle_t *zhp;
renameflags_t flags = { 0 };
int c;
int ret = 0;
int types;
boolean_t parents = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "pruf")) != -1) {
switch (c) {
case 'p':
parents = B_TRUE;
break;
case 'r':
flags.recursive = B_TRUE;
break;
case 'u':
flags.nounmount = B_TRUE;
break;
case 'f':
flags.forceunmount = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing target dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (flags.recursive && parents) {
(void) fprintf(stderr, gettext("-p and -r options are mutually "
"exclusive\n"));
usage(B_FALSE);
}
if (flags.nounmount && parents) {
(void) fprintf(stderr, gettext("-u and -p options are mutually "
"exclusive\n"));
usage(B_FALSE);
}
if (flags.recursive && strchr(argv[0], '@') == 0) {
(void) fprintf(stderr, gettext("source dataset for recursive "
"rename must be a snapshot\n"));
usage(B_FALSE);
}
if (flags.nounmount)
types = ZFS_TYPE_FILESYSTEM;
else if (parents)
types = ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME;
else
types = ZFS_TYPE_DATASET;
if ((zhp = zfs_open(g_zfs, argv[0], types)) == NULL)
return (1);
/* If we were asked and the name looks good, try to create ancestors. */
if (parents && zfs_name_valid(argv[1], zfs_get_type(zhp)) &&
zfs_create_ancestors(g_zfs, argv[1]) != 0) {
zfs_close(zhp);
return (1);
}
ret = (zfs_rename(zhp, argv[1], flags) != 0);
zfs_close(zhp);
return (ret);
}
/*
* zfs promote <fs>
*
* Promotes the given clone fs to be the parent
*/
/* ARGSUSED */
static int
zfs_do_promote(int argc, char **argv)
{
zfs_handle_t *zhp;
int ret = 0;
/* check options */
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
/* check number of arguments */
if (argc < 2) {
(void) fprintf(stderr, gettext("missing clone filesystem"
" argument\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (1);
ret = (zfs_promote(zhp) != 0);
zfs_close(zhp);
return (ret);
}
static int
zfs_do_redact(int argc, char **argv)
{
char *snap = NULL;
char *bookname = NULL;
char **rsnaps = NULL;
int numrsnaps = 0;
argv++;
argc--;
if (argc < 3) {
(void) fprintf(stderr, gettext("too few arguments\n"));
usage(B_FALSE);
}
snap = argv[0];
bookname = argv[1];
rsnaps = argv + 2;
numrsnaps = argc - 2;
nvlist_t *rsnapnv = fnvlist_alloc();
for (int i = 0; i < numrsnaps; i++) {
fnvlist_add_boolean(rsnapnv, rsnaps[i]);
}
int err = lzc_redact(snap, bookname, rsnapnv);
fnvlist_free(rsnapnv);
switch (err) {
case 0:
break;
case ENOENT:
(void) fprintf(stderr,
gettext("provided snapshot %s does not exist\n"), snap);
break;
case EEXIST:
(void) fprintf(stderr, gettext("specified redaction bookmark "
"(%s) provided already exists\n"), bookname);
break;
case ENAMETOOLONG:
(void) fprintf(stderr, gettext("provided bookmark name cannot "
"be used, final name would be too long\n"));
break;
case E2BIG:
(void) fprintf(stderr, gettext("too many redaction snapshots "
"specified\n"));
break;
case EINVAL:
if (strchr(bookname, '#') != NULL)
(void) fprintf(stderr, gettext(
"redaction bookmark name must not contain '#'\n"));
else
(void) fprintf(stderr, gettext(
"redaction snapshot must be descendent of "
"snapshot being redacted\n"));
break;
case EALREADY:
(void) fprintf(stderr, gettext("attempted to redact redacted "
"dataset or with respect to redacted dataset\n"));
break;
case ENOTSUP:
(void) fprintf(stderr, gettext("redaction bookmarks feature "
"not enabled\n"));
break;
case EXDEV:
(void) fprintf(stderr, gettext("potentially invalid redaction "
"snapshot; full dataset names required\n"));
break;
default:
(void) fprintf(stderr, gettext("internal error: %s\n"),
strerror(errno));
}
return (err);
}
/*
* zfs rollback [-rRf] <snapshot>
*
* -r Delete any intervening snapshots before doing rollback
* -R Delete any snapshots and their clones
* -f ignored for backwards compatibility
*
* Given a filesystem, rollback to a specific snapshot, discarding any changes
* since then and making it the active dataset. If more recent snapshots exist,
* the command will complain unless the '-r' flag is given.
*/
typedef struct rollback_cbdata {
uint64_t cb_create;
uint8_t cb_younger_ds_printed;
boolean_t cb_first;
int cb_doclones;
char *cb_target;
int cb_error;
boolean_t cb_recurse;
} rollback_cbdata_t;
static int
rollback_check_dependent(zfs_handle_t *zhp, void *data)
{
rollback_cbdata_t *cbp = data;
if (cbp->cb_first && cbp->cb_recurse) {
(void) fprintf(stderr, gettext("cannot rollback to "
"'%s': clones of previous snapshots exist\n"),
cbp->cb_target);
(void) fprintf(stderr, gettext("use '-R' to "
"force deletion of the following clones and "
"dependents:\n"));
cbp->cb_first = 0;
cbp->cb_error = 1;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
zfs_close(zhp);
return (0);
}
/*
* Report some snapshots/bookmarks more recent than the one specified.
* Used when '-r' is not specified. We reuse this same callback for the
* snapshot dependents - if 'cb_dependent' is set, then this is a
* dependent and we should report it without checking the transaction group.
*/
static int
rollback_check(zfs_handle_t *zhp, void *data)
{
rollback_cbdata_t *cbp = data;
/*
* Max number of younger snapshots and/or bookmarks to display before
* we stop the iteration.
*/
const uint8_t max_younger = 32;
if (cbp->cb_doclones) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) {
if (cbp->cb_first && !cbp->cb_recurse) {
(void) fprintf(stderr, gettext("cannot "
"rollback to '%s': more recent snapshots "
"or bookmarks exist\n"),
cbp->cb_target);
(void) fprintf(stderr, gettext("use '-r' to "
"force deletion of the following "
"snapshots and bookmarks:\n"));
cbp->cb_first = 0;
cbp->cb_error = 1;
}
if (cbp->cb_recurse) {
if (zfs_iter_dependents(zhp, B_TRUE,
rollback_check_dependent, cbp) != 0) {
zfs_close(zhp);
return (-1);
}
} else {
(void) fprintf(stderr, "%s\n",
zfs_get_name(zhp));
cbp->cb_younger_ds_printed++;
}
}
zfs_close(zhp);
if (cbp->cb_younger_ds_printed == max_younger) {
/*
* This non-recursive rollback is going to fail due to the
* presence of snapshots and/or bookmarks that are younger than
* the rollback target.
* We printed some of the offending objects, now we stop
* zfs_iter_snapshot/bookmark iteration so we can fail fast and
* avoid iterating over the rest of the younger objects
*/
(void) fprintf(stderr, gettext("Output limited to %d "
"snapshots/bookmarks\n"), max_younger);
return (-1);
}
return (0);
}
static int
zfs_do_rollback(int argc, char **argv)
{
int ret = 0;
int c;
boolean_t force = B_FALSE;
rollback_cbdata_t cb = { 0 };
zfs_handle_t *zhp, *snap;
char parentname[ZFS_MAX_DATASET_NAME_LEN];
char *delim;
uint64_t min_txg = 0;
/* check options */
while ((c = getopt(argc, argv, "rRf")) != -1) {
switch (c) {
case 'r':
cb.cb_recurse = 1;
break;
case 'R':
cb.cb_recurse = 1;
cb.cb_doclones = 1;
break;
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/* open the snapshot */
if ((snap = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL)
return (1);
/* open the parent dataset */
(void) strlcpy(parentname, argv[0], sizeof (parentname));
verify((delim = strrchr(parentname, '@')) != NULL);
*delim = '\0';
if ((zhp = zfs_open(g_zfs, parentname, ZFS_TYPE_DATASET)) == NULL) {
zfs_close(snap);
return (1);
}
/*
* Check for more recent snapshots and/or clones based on the presence
* of '-r' and '-R'.
*/
cb.cb_target = argv[0];
cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
cb.cb_first = B_TRUE;
cb.cb_error = 0;
if (cb.cb_create > 0)
min_txg = cb.cb_create;
if ((ret = zfs_iter_snapshots(zhp, B_FALSE, rollback_check, &cb,
min_txg, 0)) != 0)
goto out;
if ((ret = zfs_iter_bookmarks(zhp, rollback_check, &cb)) != 0)
goto out;
if ((ret = cb.cb_error) != 0)
goto out;
/*
* Rollback parent to the given snapshot.
*/
ret = zfs_rollback(zhp, snap, force);
out:
zfs_close(snap);
zfs_close(zhp);
if (ret == 0)
return (0);
else
return (1);
}
/*
* zfs set property=value ... { fs | snap | vol } ...
*
* Sets the given properties for all datasets specified on the command line.
*/
static int
set_callback(zfs_handle_t *zhp, void *data)
{
nvlist_t *props = data;
if (zfs_prop_set_list(zhp, props) != 0) {
switch (libzfs_errno(g_zfs)) {
case EZFS_MOUNTFAILED:
(void) fprintf(stderr, gettext("property may be set "
"but unable to remount filesystem\n"));
break;
case EZFS_SHARENFSFAILED:
(void) fprintf(stderr, gettext("property may be set "
"but unable to reshare filesystem\n"));
break;
}
return (1);
}
return (0);
}
static int
zfs_do_set(int argc, char **argv)
{
nvlist_t *props = NULL;
int ds_start = -1; /* argv idx of first dataset arg */
int ret = 0;
int i;
/* check for options */
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
/* check number of arguments */
if (argc < 2) {
(void) fprintf(stderr, gettext("missing arguments\n"));
usage(B_FALSE);
}
if (argc < 3) {
if (strchr(argv[1], '=') == NULL) {
(void) fprintf(stderr, gettext("missing property=value "
"argument(s)\n"));
} else {
(void) fprintf(stderr, gettext("missing dataset "
"name(s)\n"));
}
usage(B_FALSE);
}
/* validate argument order: prop=val args followed by dataset args */
for (i = 1; i < argc; i++) {
if (strchr(argv[i], '=') != NULL) {
if (ds_start > 0) {
/* out-of-order prop=val argument */
(void) fprintf(stderr, gettext("invalid "
"argument order\n"));
usage(B_FALSE);
}
} else if (ds_start < 0) {
ds_start = i;
}
}
if (ds_start < 0) {
(void) fprintf(stderr, gettext("missing dataset name(s)\n"));
usage(B_FALSE);
}
/* Populate a list of property settings */
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
for (i = 1; i < ds_start; i++) {
if (!parseprop(props, argv[i])) {
ret = -1;
goto error;
}
}
ret = zfs_for_each(argc - ds_start, argv + ds_start, 0,
ZFS_TYPE_DATASET, NULL, NULL, 0, set_callback, props);
error:
nvlist_free(props);
return (ret);
}
typedef struct snap_cbdata {
nvlist_t *sd_nvl;
boolean_t sd_recursive;
const char *sd_snapname;
} snap_cbdata_t;
static int
zfs_snapshot_cb(zfs_handle_t *zhp, void *arg)
{
snap_cbdata_t *sd = arg;
char *name;
int rv = 0;
int error;
if (sd->sd_recursive &&
zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) != 0) {
zfs_close(zhp);
return (0);
}
error = asprintf(&name, "%s@%s", zfs_get_name(zhp), sd->sd_snapname);
if (error == -1)
nomem();
fnvlist_add_boolean(sd->sd_nvl, name);
free(name);
if (sd->sd_recursive)
rv = zfs_iter_filesystems(zhp, zfs_snapshot_cb, sd);
zfs_close(zhp);
return (rv);
}
/*
* zfs snapshot [-r] [-o prop=value] ... <fs@snap>
*
* Creates a snapshot with the given name. While functionally equivalent to
* 'zfs create', it is a separate command to differentiate intent.
*/
static int
zfs_do_snapshot(int argc, char **argv)
{
int ret = 0;
int c;
nvlist_t *props;
snap_cbdata_t sd = { 0 };
boolean_t multiple_snaps = B_FALSE;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (nvlist_alloc(&sd.sd_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, "ro:")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(sd.sd_nvl);
nvlist_free(props);
return (1);
}
break;
case 'r':
sd.sd_recursive = B_TRUE;
multiple_snaps = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing snapshot argument\n"));
goto usage;
}
if (argc > 1)
multiple_snaps = B_TRUE;
for (; argc > 0; argc--, argv++) {
char *atp;
zfs_handle_t *zhp;
atp = strchr(argv[0], '@');
if (atp == NULL)
goto usage;
*atp = '\0';
sd.sd_snapname = atp + 1;
zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
goto usage;
if (zfs_snapshot_cb(zhp, &sd) != 0)
goto usage;
}
ret = zfs_snapshot_nvl(g_zfs, sd.sd_nvl, props);
nvlist_free(sd.sd_nvl);
nvlist_free(props);
if (ret != 0 && multiple_snaps)
(void) fprintf(stderr, gettext("no snapshots were created\n"));
return (ret != 0);
usage:
nvlist_free(sd.sd_nvl);
nvlist_free(props);
usage(B_FALSE);
return (-1);
}
/*
* Send a backup stream to stdout.
*/
static int
zfs_do_send(int argc, char **argv)
{
char *fromname = NULL;
char *toname = NULL;
char *resume_token = NULL;
char *cp;
zfs_handle_t *zhp;
sendflags_t flags = { 0 };
int c, err;
nvlist_t *dbgnv = NULL;
char *redactbook = NULL;
struct option long_options[] = {
{"replicate", no_argument, NULL, 'R'},
{"skip-missing", no_argument, NULL, 's'},
{"redact", required_argument, NULL, 'd'},
{"props", no_argument, NULL, 'p'},
{"parsable", no_argument, NULL, 'P'},
{"dedup", no_argument, NULL, 'D'},
{"verbose", no_argument, NULL, 'v'},
{"dryrun", no_argument, NULL, 'n'},
{"large-block", no_argument, NULL, 'L'},
{"embed", no_argument, NULL, 'e'},
{"resume", required_argument, NULL, 't'},
{"compressed", no_argument, NULL, 'c'},
{"raw", no_argument, NULL, 'w'},
{"backup", no_argument, NULL, 'b'},
{"holds", no_argument, NULL, 'h'},
{"saved", no_argument, NULL, 'S'},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, ":i:I:RsDpvnPLeht:cwbd:S",
long_options, NULL)) != -1) {
switch (c) {
case 'i':
if (fromname)
usage(B_FALSE);
fromname = optarg;
break;
case 'I':
if (fromname)
usage(B_FALSE);
fromname = optarg;
flags.doall = B_TRUE;
break;
case 'R':
flags.replicate = B_TRUE;
break;
case 's':
flags.skipmissing = B_TRUE;
break;
case 'd':
redactbook = optarg;
break;
case 'p':
flags.props = B_TRUE;
break;
case 'b':
flags.backup = B_TRUE;
break;
case 'h':
flags.holds = B_TRUE;
break;
case 'P':
flags.parsable = B_TRUE;
break;
case 'v':
flags.verbosity++;
flags.progress = B_TRUE;
break;
case 'D':
(void) fprintf(stderr,
gettext("WARNING: deduplicated send is no "
"longer supported. A regular,\n"
"non-deduplicated stream will be generated.\n\n"));
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'L':
flags.largeblock = B_TRUE;
break;
case 'e':
flags.embed_data = B_TRUE;
break;
case 't':
resume_token = optarg;
break;
case 'c':
flags.compress = B_TRUE;
break;
case 'w':
flags.raw = B_TRUE;
flags.compress = B_TRUE;
flags.embed_data = B_TRUE;
flags.largeblock = B_TRUE;
break;
case 'S':
flags.saved = B_TRUE;
break;
case ':':
/*
* If a parameter was not passed, optopt contains the
* value that would normally lead us into the
* appropriate case statement. If it's > 256, then this
* must be a longopt and we should look at argv to get
* the string. Otherwise it's just the character, so we
* should use it directly.
*/
if (optopt <= UINT8_MAX) {
(void) fprintf(stderr,
gettext("missing argument for '%c' "
"option\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("missing argument for '%s' "
"option\n"), argv[optind - 1]);
}
usage(B_FALSE);
break;
case '?':
default:
/*
* If an invalid flag was passed, optopt contains the
* character if it was a short flag, or 0 if it was a
* longopt.
*/
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
if (flags.parsable && flags.verbosity == 0)
flags.verbosity = 1;
argc -= optind;
argv += optind;
if (resume_token != NULL) {
if (fromname != NULL || flags.replicate || flags.props ||
flags.backup || flags.holds ||
flags.saved || redactbook != NULL) {
(void) fprintf(stderr,
gettext("invalid flags combined with -t\n"));
usage(B_FALSE);
}
if (argc > 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
} else {
if (argc < 1) {
(void) fprintf(stderr,
gettext("missing snapshot argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
}
if (flags.saved) {
if (fromname != NULL || flags.replicate || flags.props ||
flags.doall || flags.backup ||
flags.holds || flags.largeblock || flags.embed_data ||
flags.compress || flags.raw || redactbook != NULL) {
(void) fprintf(stderr, gettext("incompatible flags "
"combined with saved send flag\n"));
usage(B_FALSE);
}
if (strchr(argv[0], '@') != NULL) {
(void) fprintf(stderr, gettext("saved send must "
"specify the dataset with partially-received "
"state\n"));
usage(B_FALSE);
}
}
if (flags.raw && redactbook != NULL) {
(void) fprintf(stderr,
gettext("Error: raw sends may not be redacted.\n"));
return (1);
}
if (!flags.dryrun && isatty(STDOUT_FILENO)) {
(void) fprintf(stderr,
gettext("Error: Stream can not be written to a terminal.\n"
"You must redirect standard output.\n"));
return (1);
}
if (flags.saved) {
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET);
if (zhp == NULL)
return (1);
err = zfs_send_saved(zhp, &flags, STDOUT_FILENO,
resume_token);
if (err != 0)
note_dev_error(errno, STDOUT_FILENO);
zfs_close(zhp);
return (err != 0);
} else if (resume_token != NULL) {
err = zfs_send_resume(g_zfs, &flags, STDOUT_FILENO,
resume_token);
if (err != 0)
note_dev_error(errno, STDOUT_FILENO);
return (err);
}
if (flags.skipmissing && !flags.replicate) {
(void) fprintf(stderr,
gettext("skip-missing flag can only be used in "
"conjunction with replicate\n"));
usage(B_FALSE);
}
/*
* For everything except -R and -I, use the new, cleaner code path.
*/
if (!(flags.replicate || flags.doall)) {
char frombuf[ZFS_MAX_DATASET_NAME_LEN];
if (fromname != NULL && (strchr(fromname, '#') == NULL &&
strchr(fromname, '@') == NULL)) {
/*
* Neither bookmark or snapshot was specified. Print a
* warning, and assume snapshot.
*/
(void) fprintf(stderr, "Warning: incremental source "
"didn't specify type, assuming snapshot. Use '@' "
"or '#' prefix to avoid ambiguity.\n");
(void) snprintf(frombuf, sizeof (frombuf), "@%s",
fromname);
fromname = frombuf;
}
if (fromname != NULL &&
(fromname[0] == '#' || fromname[0] == '@')) {
/*
* Incremental source name begins with # or @.
* Default to same fs as target.
*/
char tmpbuf[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(tmpbuf, fromname, sizeof (tmpbuf));
(void) strlcpy(frombuf, argv[0], sizeof (frombuf));
cp = strchr(frombuf, '@');
if (cp != NULL)
*cp = '\0';
(void) strlcat(frombuf, tmpbuf, sizeof (frombuf));
fromname = frombuf;
}
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET);
if (zhp == NULL)
return (1);
err = zfs_send_one(zhp, fromname, STDOUT_FILENO, &flags,
redactbook);
zfs_close(zhp);
if (err != 0)
note_dev_error(errno, STDOUT_FILENO);
return (err != 0);
}
if (fromname != NULL && strchr(fromname, '#')) {
(void) fprintf(stderr,
gettext("Error: multiple snapshots cannot be "
"sent from a bookmark.\n"));
return (1);
}
if (redactbook != NULL) {
(void) fprintf(stderr, gettext("Error: multiple snapshots "
"cannot be sent redacted.\n"));
return (1);
}
if ((cp = strchr(argv[0], '@')) == NULL) {
(void) fprintf(stderr, gettext("Error: "
"Unsupported flag with filesystem or bookmark.\n"));
return (1);
}
*cp = '\0';
toname = cp + 1;
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (1);
/*
* If they specified the full path to the snapshot, chop off
* everything except the short name of the snapshot, but special
* case if they specify the origin.
*/
if (fromname && (cp = strchr(fromname, '@')) != NULL) {
char origin[ZFS_MAX_DATASET_NAME_LEN];
zprop_source_t src;
(void) zfs_prop_get(zhp, ZFS_PROP_ORIGIN,
origin, sizeof (origin), &src, NULL, 0, B_FALSE);
if (strcmp(origin, fromname) == 0) {
fromname = NULL;
flags.fromorigin = B_TRUE;
} else {
*cp = '\0';
if (cp != fromname && strcmp(argv[0], fromname)) {
(void) fprintf(stderr,
gettext("incremental source must be "
"in same filesystem\n"));
usage(B_FALSE);
}
fromname = cp + 1;
if (strchr(fromname, '@') || strchr(fromname, '/')) {
(void) fprintf(stderr,
gettext("invalid incremental source\n"));
usage(B_FALSE);
}
}
}
if (flags.replicate && fromname == NULL)
flags.doall = B_TRUE;
err = zfs_send(zhp, fromname, toname, &flags, STDOUT_FILENO, NULL, 0,
flags.verbosity >= 3 ? &dbgnv : NULL);
if (flags.verbosity >= 3 && dbgnv != NULL) {
/*
* dump_nvlist prints to stdout, but that's been
* redirected to a file. Make it print to stderr
* instead.
*/
(void) dup2(STDERR_FILENO, STDOUT_FILENO);
dump_nvlist(dbgnv, 0);
nvlist_free(dbgnv);
}
zfs_close(zhp);
note_dev_error(errno, STDOUT_FILENO);
return (err != 0);
}
/*
* Restore a backup stream from stdin.
*/
static int
zfs_do_receive(int argc, char **argv)
{
int c, err = 0;
recvflags_t flags = { 0 };
boolean_t abort_resumable = B_FALSE;
nvlist_t *props;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, ":o:x:dehMnuvFsA")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'x':
if (!parsepropname(props, optarg)) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'd':
if (flags.istail) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -d and -e are mutually "
"exclusive\n"));
usage(B_FALSE);
}
flags.isprefix = B_TRUE;
break;
case 'e':
if (flags.isprefix) {
(void) fprintf(stderr, gettext("invalid option "
"combination: -d and -e are mutually "
"exclusive\n"));
usage(B_FALSE);
}
flags.istail = B_TRUE;
break;
case 'h':
flags.skipholds = B_TRUE;
break;
case 'M':
flags.forceunmount = B_TRUE;
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'u':
flags.nomount = B_TRUE;
break;
case 'v':
flags.verbose = B_TRUE;
break;
case 's':
flags.resumable = B_TRUE;
break;
case 'F':
flags.force = B_TRUE;
break;
case 'A':
abort_resumable = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* zfs recv -e (use "tail" name) implies -d (remove dataset "head") */
if (flags.istail)
flags.isprefix = B_TRUE;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing snapshot argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (abort_resumable) {
if (flags.isprefix || flags.istail || flags.dryrun ||
flags.resumable || flags.nomount) {
(void) fprintf(stderr, gettext("invalid option\n"));
usage(B_FALSE);
}
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(namebuf, sizeof (namebuf),
"%s/%%recv", argv[0]);
if (zfs_dataset_exists(g_zfs, namebuf,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) {
zfs_handle_t *zhp = zfs_open(g_zfs,
namebuf, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
nvlist_free(props);
return (1);
}
err = zfs_destroy(zhp, B_FALSE);
zfs_close(zhp);
} else {
zfs_handle_t *zhp = zfs_open(g_zfs,
argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
usage(B_FALSE);
if (!zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) ||
zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
NULL, 0, NULL, NULL, 0, B_TRUE) == -1) {
(void) fprintf(stderr,
gettext("'%s' does not have any "
"resumable receive state to abort\n"),
argv[0]);
nvlist_free(props);
zfs_close(zhp);
return (1);
}
err = zfs_destroy(zhp, B_FALSE);
zfs_close(zhp);
}
nvlist_free(props);
return (err != 0);
}
if (isatty(STDIN_FILENO)) {
(void) fprintf(stderr,
gettext("Error: Backup stream can not be read "
"from a terminal.\n"
"You must redirect standard input.\n"));
nvlist_free(props);
return (1);
}
err = zfs_receive(g_zfs, argv[0], props, &flags, STDIN_FILENO, NULL);
nvlist_free(props);
return (err != 0);
}
/*
* allow/unallow stuff
*/
/* copied from zfs/sys/dsl_deleg.h */
#define ZFS_DELEG_PERM_CREATE "create"
#define ZFS_DELEG_PERM_DESTROY "destroy"
#define ZFS_DELEG_PERM_SNAPSHOT "snapshot"
#define ZFS_DELEG_PERM_ROLLBACK "rollback"
#define ZFS_DELEG_PERM_CLONE "clone"
#define ZFS_DELEG_PERM_PROMOTE "promote"
#define ZFS_DELEG_PERM_RENAME "rename"
#define ZFS_DELEG_PERM_MOUNT "mount"
#define ZFS_DELEG_PERM_SHARE "share"
#define ZFS_DELEG_PERM_SEND "send"
#define ZFS_DELEG_PERM_RECEIVE "receive"
#define ZFS_DELEG_PERM_ALLOW "allow"
#define ZFS_DELEG_PERM_USERPROP "userprop"
#define ZFS_DELEG_PERM_VSCAN "vscan" /* ??? */
#define ZFS_DELEG_PERM_USERQUOTA "userquota"
#define ZFS_DELEG_PERM_GROUPQUOTA "groupquota"
#define ZFS_DELEG_PERM_USERUSED "userused"
#define ZFS_DELEG_PERM_GROUPUSED "groupused"
#define ZFS_DELEG_PERM_USEROBJQUOTA "userobjquota"
#define ZFS_DELEG_PERM_GROUPOBJQUOTA "groupobjquota"
#define ZFS_DELEG_PERM_USEROBJUSED "userobjused"
#define ZFS_DELEG_PERM_GROUPOBJUSED "groupobjused"
#define ZFS_DELEG_PERM_HOLD "hold"
#define ZFS_DELEG_PERM_RELEASE "release"
#define ZFS_DELEG_PERM_DIFF "diff"
#define ZFS_DELEG_PERM_BOOKMARK "bookmark"
#define ZFS_DELEG_PERM_LOAD_KEY "load-key"
#define ZFS_DELEG_PERM_CHANGE_KEY "change-key"
#define ZFS_DELEG_PERM_PROJECTUSED "projectused"
#define ZFS_DELEG_PERM_PROJECTQUOTA "projectquota"
#define ZFS_DELEG_PERM_PROJECTOBJUSED "projectobjused"
#define ZFS_DELEG_PERM_PROJECTOBJQUOTA "projectobjquota"
#define ZFS_NUM_DELEG_NOTES ZFS_DELEG_NOTE_NONE
static zfs_deleg_perm_tab_t zfs_deleg_perm_tbl[] = {
{ ZFS_DELEG_PERM_ALLOW, ZFS_DELEG_NOTE_ALLOW },
{ ZFS_DELEG_PERM_CLONE, ZFS_DELEG_NOTE_CLONE },
{ ZFS_DELEG_PERM_CREATE, ZFS_DELEG_NOTE_CREATE },
{ ZFS_DELEG_PERM_DESTROY, ZFS_DELEG_NOTE_DESTROY },
{ ZFS_DELEG_PERM_DIFF, ZFS_DELEG_NOTE_DIFF},
{ ZFS_DELEG_PERM_HOLD, ZFS_DELEG_NOTE_HOLD },
{ ZFS_DELEG_PERM_MOUNT, ZFS_DELEG_NOTE_MOUNT },
{ ZFS_DELEG_PERM_PROMOTE, ZFS_DELEG_NOTE_PROMOTE },
{ ZFS_DELEG_PERM_RECEIVE, ZFS_DELEG_NOTE_RECEIVE },
{ ZFS_DELEG_PERM_RELEASE, ZFS_DELEG_NOTE_RELEASE },
{ ZFS_DELEG_PERM_RENAME, ZFS_DELEG_NOTE_RENAME },
{ ZFS_DELEG_PERM_ROLLBACK, ZFS_DELEG_NOTE_ROLLBACK },
{ ZFS_DELEG_PERM_SEND, ZFS_DELEG_NOTE_SEND },
{ ZFS_DELEG_PERM_SHARE, ZFS_DELEG_NOTE_SHARE },
{ ZFS_DELEG_PERM_SNAPSHOT, ZFS_DELEG_NOTE_SNAPSHOT },
{ ZFS_DELEG_PERM_BOOKMARK, ZFS_DELEG_NOTE_BOOKMARK },
{ ZFS_DELEG_PERM_LOAD_KEY, ZFS_DELEG_NOTE_LOAD_KEY },
{ ZFS_DELEG_PERM_CHANGE_KEY, ZFS_DELEG_NOTE_CHANGE_KEY },
{ ZFS_DELEG_PERM_GROUPQUOTA, ZFS_DELEG_NOTE_GROUPQUOTA },
{ ZFS_DELEG_PERM_GROUPUSED, ZFS_DELEG_NOTE_GROUPUSED },
{ ZFS_DELEG_PERM_USERPROP, ZFS_DELEG_NOTE_USERPROP },
{ ZFS_DELEG_PERM_USERQUOTA, ZFS_DELEG_NOTE_USERQUOTA },
{ ZFS_DELEG_PERM_USERUSED, ZFS_DELEG_NOTE_USERUSED },
{ ZFS_DELEG_PERM_USEROBJQUOTA, ZFS_DELEG_NOTE_USEROBJQUOTA },
{ ZFS_DELEG_PERM_USEROBJUSED, ZFS_DELEG_NOTE_USEROBJUSED },
{ ZFS_DELEG_PERM_GROUPOBJQUOTA, ZFS_DELEG_NOTE_GROUPOBJQUOTA },
{ ZFS_DELEG_PERM_GROUPOBJUSED, ZFS_DELEG_NOTE_GROUPOBJUSED },
{ ZFS_DELEG_PERM_PROJECTUSED, ZFS_DELEG_NOTE_PROJECTUSED },
{ ZFS_DELEG_PERM_PROJECTQUOTA, ZFS_DELEG_NOTE_PROJECTQUOTA },
{ ZFS_DELEG_PERM_PROJECTOBJUSED, ZFS_DELEG_NOTE_PROJECTOBJUSED },
{ ZFS_DELEG_PERM_PROJECTOBJQUOTA, ZFS_DELEG_NOTE_PROJECTOBJQUOTA },
{ NULL, ZFS_DELEG_NOTE_NONE }
};
/* permission structure */
typedef struct deleg_perm {
zfs_deleg_who_type_t dp_who_type;
const char *dp_name;
boolean_t dp_local;
boolean_t dp_descend;
} deleg_perm_t;
/* */
typedef struct deleg_perm_node {
deleg_perm_t dpn_perm;
uu_avl_node_t dpn_avl_node;
} deleg_perm_node_t;
typedef struct fs_perm fs_perm_t;
/* permissions set */
typedef struct who_perm {
zfs_deleg_who_type_t who_type;
const char *who_name; /* id */
char who_ug_name[256]; /* user/group name */
fs_perm_t *who_fsperm; /* uplink */
uu_avl_t *who_deleg_perm_avl; /* permissions */
} who_perm_t;
/* */
typedef struct who_perm_node {
who_perm_t who_perm;
uu_avl_node_t who_avl_node;
} who_perm_node_t;
typedef struct fs_perm_set fs_perm_set_t;
/* fs permissions */
struct fs_perm {
const char *fsp_name;
uu_avl_t *fsp_sc_avl; /* sets,create */
uu_avl_t *fsp_uge_avl; /* user,group,everyone */
fs_perm_set_t *fsp_set; /* uplink */
};
/* */
typedef struct fs_perm_node {
fs_perm_t fspn_fsperm;
uu_avl_t *fspn_avl;
uu_list_node_t fspn_list_node;
} fs_perm_node_t;
/* top level structure */
struct fs_perm_set {
uu_list_pool_t *fsps_list_pool;
uu_list_t *fsps_list; /* list of fs_perms */
uu_avl_pool_t *fsps_named_set_avl_pool;
uu_avl_pool_t *fsps_who_perm_avl_pool;
uu_avl_pool_t *fsps_deleg_perm_avl_pool;
};
static inline const char *
deleg_perm_type(zfs_deleg_note_t note)
{
/* subcommands */
switch (note) {
/* SUBCOMMANDS */
/* OTHER */
case ZFS_DELEG_NOTE_GROUPQUOTA:
case ZFS_DELEG_NOTE_GROUPUSED:
case ZFS_DELEG_NOTE_USERPROP:
case ZFS_DELEG_NOTE_USERQUOTA:
case ZFS_DELEG_NOTE_USERUSED:
case ZFS_DELEG_NOTE_USEROBJQUOTA:
case ZFS_DELEG_NOTE_USEROBJUSED:
case ZFS_DELEG_NOTE_GROUPOBJQUOTA:
case ZFS_DELEG_NOTE_GROUPOBJUSED:
case ZFS_DELEG_NOTE_PROJECTUSED:
case ZFS_DELEG_NOTE_PROJECTQUOTA:
case ZFS_DELEG_NOTE_PROJECTOBJUSED:
case ZFS_DELEG_NOTE_PROJECTOBJQUOTA:
/* other */
return (gettext("other"));
default:
return (gettext("subcommand"));
}
}
static int
who_type2weight(zfs_deleg_who_type_t who_type)
{
int res;
switch (who_type) {
case ZFS_DELEG_NAMED_SET_SETS:
case ZFS_DELEG_NAMED_SET:
res = 0;
break;
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_CREATE:
res = 1;
break;
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
res = 2;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
res = 3;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
res = 4;
break;
default:
res = -1;
}
return (res);
}
/* ARGSUSED */
static int
who_perm_compare(const void *larg, const void *rarg, void *unused)
{
const who_perm_node_t *l = larg;
const who_perm_node_t *r = rarg;
zfs_deleg_who_type_t ltype = l->who_perm.who_type;
zfs_deleg_who_type_t rtype = r->who_perm.who_type;
int lweight = who_type2weight(ltype);
int rweight = who_type2weight(rtype);
int res = lweight - rweight;
if (res == 0)
res = strncmp(l->who_perm.who_name, r->who_perm.who_name,
ZFS_MAX_DELEG_NAME-1);
if (res == 0)
return (0);
if (res > 0)
return (1);
else
return (-1);
}
/* ARGSUSED */
static int
deleg_perm_compare(const void *larg, const void *rarg, void *unused)
{
const deleg_perm_node_t *l = larg;
const deleg_perm_node_t *r = rarg;
int res = strncmp(l->dpn_perm.dp_name, r->dpn_perm.dp_name,
ZFS_MAX_DELEG_NAME-1);
if (res == 0)
return (0);
if (res > 0)
return (1);
else
return (-1);
}
static inline void
fs_perm_set_init(fs_perm_set_t *fspset)
{
bzero(fspset, sizeof (fs_perm_set_t));
if ((fspset->fsps_list_pool = uu_list_pool_create("fsps_list_pool",
sizeof (fs_perm_node_t), offsetof(fs_perm_node_t, fspn_list_node),
NULL, UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_list = uu_list_create(fspset->fsps_list_pool, NULL,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_named_set_avl_pool = uu_avl_pool_create(
"named_set_avl_pool", sizeof (who_perm_node_t), offsetof(
who_perm_node_t, who_avl_node), who_perm_compare,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_who_perm_avl_pool = uu_avl_pool_create(
"who_perm_avl_pool", sizeof (who_perm_node_t), offsetof(
who_perm_node_t, who_avl_node), who_perm_compare,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_deleg_perm_avl_pool = uu_avl_pool_create(
"deleg_perm_avl_pool", sizeof (deleg_perm_node_t), offsetof(
deleg_perm_node_t, dpn_avl_node), deleg_perm_compare, UU_DEFAULT))
== NULL)
nomem();
}
static inline void fs_perm_fini(fs_perm_t *);
static inline void who_perm_fini(who_perm_t *);
static inline void
fs_perm_set_fini(fs_perm_set_t *fspset)
{
fs_perm_node_t *node = uu_list_first(fspset->fsps_list);
while (node != NULL) {
fs_perm_node_t *next_node =
uu_list_next(fspset->fsps_list, node);
fs_perm_t *fsperm = &node->fspn_fsperm;
fs_perm_fini(fsperm);
uu_list_remove(fspset->fsps_list, node);
free(node);
node = next_node;
}
uu_avl_pool_destroy(fspset->fsps_named_set_avl_pool);
uu_avl_pool_destroy(fspset->fsps_who_perm_avl_pool);
uu_avl_pool_destroy(fspset->fsps_deleg_perm_avl_pool);
}
static inline void
deleg_perm_init(deleg_perm_t *deleg_perm, zfs_deleg_who_type_t type,
const char *name)
{
deleg_perm->dp_who_type = type;
deleg_perm->dp_name = name;
}
static inline void
who_perm_init(who_perm_t *who_perm, fs_perm_t *fsperm,
zfs_deleg_who_type_t type, const char *name)
{
uu_avl_pool_t *pool;
pool = fsperm->fsp_set->fsps_deleg_perm_avl_pool;
bzero(who_perm, sizeof (who_perm_t));
if ((who_perm->who_deleg_perm_avl = uu_avl_create(pool, NULL,
UU_DEFAULT)) == NULL)
nomem();
who_perm->who_type = type;
who_perm->who_name = name;
who_perm->who_fsperm = fsperm;
}
static inline void
who_perm_fini(who_perm_t *who_perm)
{
deleg_perm_node_t *node = uu_avl_first(who_perm->who_deleg_perm_avl);
while (node != NULL) {
deleg_perm_node_t *next_node =
uu_avl_next(who_perm->who_deleg_perm_avl, node);
uu_avl_remove(who_perm->who_deleg_perm_avl, node);
free(node);
node = next_node;
}
uu_avl_destroy(who_perm->who_deleg_perm_avl);
}
static inline void
fs_perm_init(fs_perm_t *fsperm, fs_perm_set_t *fspset, const char *fsname)
{
uu_avl_pool_t *nset_pool = fspset->fsps_named_set_avl_pool;
uu_avl_pool_t *who_pool = fspset->fsps_who_perm_avl_pool;
bzero(fsperm, sizeof (fs_perm_t));
if ((fsperm->fsp_sc_avl = uu_avl_create(nset_pool, NULL, UU_DEFAULT))
== NULL)
nomem();
if ((fsperm->fsp_uge_avl = uu_avl_create(who_pool, NULL, UU_DEFAULT))
== NULL)
nomem();
fsperm->fsp_set = fspset;
fsperm->fsp_name = fsname;
}
static inline void
fs_perm_fini(fs_perm_t *fsperm)
{
who_perm_node_t *node = uu_avl_first(fsperm->fsp_sc_avl);
while (node != NULL) {
who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_sc_avl,
node);
who_perm_t *who_perm = &node->who_perm;
who_perm_fini(who_perm);
uu_avl_remove(fsperm->fsp_sc_avl, node);
free(node);
node = next_node;
}
node = uu_avl_first(fsperm->fsp_uge_avl);
while (node != NULL) {
who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_uge_avl,
node);
who_perm_t *who_perm = &node->who_perm;
who_perm_fini(who_perm);
uu_avl_remove(fsperm->fsp_uge_avl, node);
free(node);
node = next_node;
}
uu_avl_destroy(fsperm->fsp_sc_avl);
uu_avl_destroy(fsperm->fsp_uge_avl);
}
static void
set_deleg_perm_node(uu_avl_t *avl, deleg_perm_node_t *node,
zfs_deleg_who_type_t who_type, const char *name, char locality)
{
uu_avl_index_t idx = 0;
deleg_perm_node_t *found_node = NULL;
deleg_perm_t *deleg_perm = &node->dpn_perm;
deleg_perm_init(deleg_perm, who_type, name);
if ((found_node = uu_avl_find(avl, node, NULL, &idx))
== NULL)
uu_avl_insert(avl, node, idx);
else {
node = found_node;
deleg_perm = &node->dpn_perm;
}
switch (locality) {
case ZFS_DELEG_LOCAL:
deleg_perm->dp_local = B_TRUE;
break;
case ZFS_DELEG_DESCENDENT:
deleg_perm->dp_descend = B_TRUE;
break;
case ZFS_DELEG_NA:
break;
default:
assert(B_FALSE); /* invalid locality */
}
}
static inline int
parse_who_perm(who_perm_t *who_perm, nvlist_t *nvl, char locality)
{
nvpair_t *nvp = NULL;
fs_perm_set_t *fspset = who_perm->who_fsperm->fsp_set;
uu_avl_t *avl = who_perm->who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_perm->who_type;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
data_type_t type = nvpair_type(nvp);
uu_avl_pool_t *avl_pool = fspset->fsps_deleg_perm_avl_pool;
deleg_perm_node_t *node =
safe_malloc(sizeof (deleg_perm_node_t));
VERIFY(type == DATA_TYPE_BOOLEAN);
uu_avl_node_init(node, &node->dpn_avl_node, avl_pool);
set_deleg_perm_node(avl, node, who_type, name, locality);
}
return (0);
}
static inline int
parse_fs_perm(fs_perm_t *fsperm, nvlist_t *nvl)
{
nvpair_t *nvp = NULL;
fs_perm_set_t *fspset = fsperm->fsp_set;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
nvlist_t *nvl2 = NULL;
const char *name = nvpair_name(nvp);
uu_avl_t *avl = NULL;
uu_avl_pool_t *avl_pool = NULL;
zfs_deleg_who_type_t perm_type = name[0];
char perm_locality = name[1];
const char *perm_name = name + 3;
who_perm_t *who_perm = NULL;
assert('$' == name[2]);
if (nvpair_value_nvlist(nvp, &nvl2) != 0)
return (-1);
switch (perm_type) {
case ZFS_DELEG_CREATE:
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_NAMED_SET:
case ZFS_DELEG_NAMED_SET_SETS:
avl_pool = fspset->fsps_named_set_avl_pool;
avl = fsperm->fsp_sc_avl;
break;
case ZFS_DELEG_USER:
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_GROUP:
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_EVERYONE:
case ZFS_DELEG_EVERYONE_SETS:
avl_pool = fspset->fsps_who_perm_avl_pool;
avl = fsperm->fsp_uge_avl;
break;
default:
assert(!"unhandled zfs_deleg_who_type_t");
}
who_perm_node_t *found_node = NULL;
who_perm_node_t *node = safe_malloc(
sizeof (who_perm_node_t));
who_perm = &node->who_perm;
uu_avl_index_t idx = 0;
uu_avl_node_init(node, &node->who_avl_node, avl_pool);
who_perm_init(who_perm, fsperm, perm_type, perm_name);
if ((found_node = uu_avl_find(avl, node, NULL, &idx))
== NULL) {
if (avl == fsperm->fsp_uge_avl) {
uid_t rid = 0;
struct passwd *p = NULL;
struct group *g = NULL;
const char *nice_name = NULL;
switch (perm_type) {
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
rid = atoi(perm_name);
p = getpwuid(rid);
if (p)
nice_name = p->pw_name;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
rid = atoi(perm_name);
g = getgrgid(rid);
if (g)
nice_name = g->gr_name;
break;
default:
break;
}
if (nice_name != NULL) {
(void) strlcpy(
node->who_perm.who_ug_name,
nice_name, 256);
} else {
/* User or group unknown */
(void) snprintf(
node->who_perm.who_ug_name,
sizeof (node->who_perm.who_ug_name),
"(unknown: %d)", rid);
}
}
uu_avl_insert(avl, node, idx);
} else {
node = found_node;
who_perm = &node->who_perm;
}
assert(who_perm != NULL);
(void) parse_who_perm(who_perm, nvl2, perm_locality);
}
return (0);
}
static inline int
parse_fs_perm_set(fs_perm_set_t *fspset, nvlist_t *nvl)
{
nvpair_t *nvp = NULL;
uu_avl_index_t idx = 0;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
nvlist_t *nvl2 = NULL;
const char *fsname = nvpair_name(nvp);
data_type_t type = nvpair_type(nvp);
fs_perm_t *fsperm = NULL;
fs_perm_node_t *node = safe_malloc(sizeof (fs_perm_node_t));
if (node == NULL)
nomem();
fsperm = &node->fspn_fsperm;
VERIFY(DATA_TYPE_NVLIST == type);
uu_list_node_init(node, &node->fspn_list_node,
fspset->fsps_list_pool);
idx = uu_list_numnodes(fspset->fsps_list);
fs_perm_init(fsperm, fspset, fsname);
if (nvpair_value_nvlist(nvp, &nvl2) != 0)
return (-1);
(void) parse_fs_perm(fsperm, nvl2);
uu_list_insert(fspset->fsps_list, node, idx);
}
return (0);
}
static inline const char *
deleg_perm_comment(zfs_deleg_note_t note)
{
const char *str = "";
/* subcommands */
switch (note) {
/* SUBCOMMANDS */
case ZFS_DELEG_NOTE_ALLOW:
str = gettext("Must also have the permission that is being"
"\n\t\t\t\tallowed");
break;
case ZFS_DELEG_NOTE_CLONE:
str = gettext("Must also have the 'create' ability and 'mount'"
"\n\t\t\t\tability in the origin file system");
break;
case ZFS_DELEG_NOTE_CREATE:
str = gettext("Must also have the 'mount' ability");
break;
case ZFS_DELEG_NOTE_DESTROY:
str = gettext("Must also have the 'mount' ability");
break;
case ZFS_DELEG_NOTE_DIFF:
str = gettext("Allows lookup of paths within a dataset;"
"\n\t\t\t\tgiven an object number. Ordinary users need this"
"\n\t\t\t\tin order to use zfs diff");
break;
case ZFS_DELEG_NOTE_HOLD:
str = gettext("Allows adding a user hold to a snapshot");
break;
case ZFS_DELEG_NOTE_MOUNT:
str = gettext("Allows mount/umount of ZFS datasets");
break;
case ZFS_DELEG_NOTE_PROMOTE:
str = gettext("Must also have the 'mount'\n\t\t\t\tand"
" 'promote' ability in the origin file system");
break;
case ZFS_DELEG_NOTE_RECEIVE:
str = gettext("Must also have the 'mount' and 'create'"
" ability");
break;
case ZFS_DELEG_NOTE_RELEASE:
str = gettext("Allows releasing a user hold which\n\t\t\t\t"
"might destroy the snapshot");
break;
case ZFS_DELEG_NOTE_RENAME:
str = gettext("Must also have the 'mount' and 'create'"
"\n\t\t\t\tability in the new parent");
break;
case ZFS_DELEG_NOTE_ROLLBACK:
str = gettext("");
break;
case ZFS_DELEG_NOTE_SEND:
str = gettext("");
break;
case ZFS_DELEG_NOTE_SHARE:
str = gettext("Allows sharing file systems over NFS or SMB"
"\n\t\t\t\tprotocols");
break;
case ZFS_DELEG_NOTE_SNAPSHOT:
str = gettext("");
break;
case ZFS_DELEG_NOTE_LOAD_KEY:
str = gettext("Allows loading or unloading an encryption key");
break;
case ZFS_DELEG_NOTE_CHANGE_KEY:
str = gettext("Allows changing or adding an encryption key");
break;
/*
* case ZFS_DELEG_NOTE_VSCAN:
* str = gettext("");
* break;
*/
/* OTHER */
case ZFS_DELEG_NOTE_GROUPQUOTA:
str = gettext("Allows accessing any groupquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPUSED:
str = gettext("Allows reading any groupused@... property");
break;
case ZFS_DELEG_NOTE_USERPROP:
str = gettext("Allows changing any user property");
break;
case ZFS_DELEG_NOTE_USERQUOTA:
str = gettext("Allows accessing any userquota@... property");
break;
case ZFS_DELEG_NOTE_USERUSED:
str = gettext("Allows reading any userused@... property");
break;
case ZFS_DELEG_NOTE_USEROBJQUOTA:
str = gettext("Allows accessing any userobjquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPOBJQUOTA:
str = gettext("Allows accessing any \n\t\t\t\t"
"groupobjquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPOBJUSED:
str = gettext("Allows reading any groupobjused@... property");
break;
case ZFS_DELEG_NOTE_USEROBJUSED:
str = gettext("Allows reading any userobjused@... property");
break;
case ZFS_DELEG_NOTE_PROJECTQUOTA:
str = gettext("Allows accessing any projectquota@... property");
break;
case ZFS_DELEG_NOTE_PROJECTOBJQUOTA:
str = gettext("Allows accessing any \n\t\t\t\t"
"projectobjquota@... property");
break;
case ZFS_DELEG_NOTE_PROJECTUSED:
str = gettext("Allows reading any projectused@... property");
break;
case ZFS_DELEG_NOTE_PROJECTOBJUSED:
str = gettext("Allows accessing any \n\t\t\t\t"
"projectobjused@... property");
break;
/* other */
default:
str = "";
}
return (str);
}
struct allow_opts {
boolean_t local;
boolean_t descend;
boolean_t user;
boolean_t group;
boolean_t everyone;
boolean_t create;
boolean_t set;
boolean_t recursive; /* unallow only */
boolean_t prt_usage;
boolean_t prt_perms;
char *who;
char *perms;
const char *dataset;
};
static inline int
prop_cmp(const void *a, const void *b)
{
const char *str1 = *(const char **)a;
const char *str2 = *(const char **)b;
return (strcmp(str1, str2));
}
static void
allow_usage(boolean_t un, boolean_t requested, const char *msg)
{
const char *opt_desc[] = {
"-h", gettext("show this help message and exit"),
"-l", gettext("set permission locally"),
"-d", gettext("set permission for descents"),
"-u", gettext("set permission for user"),
"-g", gettext("set permission for group"),
"-e", gettext("set permission for everyone"),
"-c", gettext("set create time permission"),
"-s", gettext("define permission set"),
/* unallow only */
"-r", gettext("remove permissions recursively"),
};
size_t unallow_size = sizeof (opt_desc) / sizeof (char *);
size_t allow_size = unallow_size - 2;
const char *props[ZFS_NUM_PROPS];
int i;
size_t count = 0;
FILE *fp = requested ? stdout : stderr;
zprop_desc_t *pdtbl = zfs_prop_get_table();
const char *fmt = gettext("%-16s %-14s\t%s\n");
(void) fprintf(fp, gettext("Usage: %s\n"), get_usage(un ? HELP_UNALLOW :
HELP_ALLOW));
(void) fprintf(fp, gettext("Options:\n"));
for (i = 0; i < (un ? unallow_size : allow_size); i += 2) {
const char *opt = opt_desc[i];
const char *optdsc = opt_desc[i + 1];
(void) fprintf(fp, gettext(" %-10s %s\n"), opt, optdsc);
}
(void) fprintf(fp, gettext("\nThe following permissions are "
"supported:\n\n"));
(void) fprintf(fp, fmt, gettext("NAME"), gettext("TYPE"),
gettext("NOTES"));
for (i = 0; i < ZFS_NUM_DELEG_NOTES; i++) {
const char *perm_name = zfs_deleg_perm_tbl[i].z_perm;
zfs_deleg_note_t perm_note = zfs_deleg_perm_tbl[i].z_note;
const char *perm_type = deleg_perm_type(perm_note);
const char *perm_comment = deleg_perm_comment(perm_note);
(void) fprintf(fp, fmt, perm_name, perm_type, perm_comment);
}
for (i = 0; i < ZFS_NUM_PROPS; i++) {
zprop_desc_t *pd = &pdtbl[i];
if (pd->pd_visible != B_TRUE)
continue;
if (pd->pd_attr == PROP_READONLY)
continue;
props[count++] = pd->pd_name;
}
props[count] = NULL;
qsort(props, count, sizeof (char *), prop_cmp);
for (i = 0; i < count; i++)
(void) fprintf(fp, fmt, props[i], gettext("property"), "");
if (msg != NULL)
(void) fprintf(fp, gettext("\nzfs: error: %s"), msg);
exit(requested ? 0 : 2);
}
static inline const char *
munge_args(int argc, char **argv, boolean_t un, size_t expected_argc,
char **permsp)
{
if (un && argc == expected_argc - 1)
*permsp = NULL;
else if (argc == expected_argc)
*permsp = argv[argc - 2];
else
allow_usage(un, B_FALSE,
gettext("wrong number of parameters\n"));
return (argv[argc - 1]);
}
static void
parse_allow_args(int argc, char **argv, boolean_t un, struct allow_opts *opts)
{
int uge_sum = opts->user + opts->group + opts->everyone;
int csuge_sum = opts->create + opts->set + uge_sum;
int ldcsuge_sum = csuge_sum + opts->local + opts->descend;
int all_sum = un ? ldcsuge_sum + opts->recursive : ldcsuge_sum;
if (uge_sum > 1)
allow_usage(un, B_FALSE,
gettext("-u, -g, and -e are mutually exclusive\n"));
if (opts->prt_usage) {
if (argc == 0 && all_sum == 0)
allow_usage(un, B_TRUE, NULL);
else
usage(B_FALSE);
}
if (opts->set) {
if (csuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -s\n"));
opts->dataset = munge_args(argc, argv, un, 3, &opts->perms);
if (argv[0][0] != '@')
allow_usage(un, B_FALSE,
gettext("invalid set name: missing '@' prefix\n"));
opts->who = argv[0];
} else if (opts->create) {
if (ldcsuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -c\n"));
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (opts->everyone) {
if (csuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -e\n"));
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (uge_sum == 0 && argc > 0 && strcmp(argv[0], "everyone")
== 0) {
opts->everyone = B_TRUE;
argc--;
argv++;
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (argc == 1 && !un) {
opts->prt_perms = B_TRUE;
opts->dataset = argv[argc-1];
} else {
opts->dataset = munge_args(argc, argv, un, 3, &opts->perms);
opts->who = argv[0];
}
if (!opts->local && !opts->descend) {
opts->local = B_TRUE;
opts->descend = B_TRUE;
}
}
static void
store_allow_perm(zfs_deleg_who_type_t type, boolean_t local, boolean_t descend,
const char *who, char *perms, nvlist_t *top_nvl)
{
int i;
char ld[2] = { '\0', '\0' };
char who_buf[MAXNAMELEN + 32];
char base_type = '\0';
char set_type = '\0';
nvlist_t *base_nvl = NULL;
nvlist_t *set_nvl = NULL;
nvlist_t *nvl;
if (nvlist_alloc(&base_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (nvlist_alloc(&set_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
switch (type) {
case ZFS_DELEG_NAMED_SET_SETS:
case ZFS_DELEG_NAMED_SET:
set_type = ZFS_DELEG_NAMED_SET_SETS;
base_type = ZFS_DELEG_NAMED_SET;
ld[0] = ZFS_DELEG_NA;
break;
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_CREATE:
set_type = ZFS_DELEG_CREATE_SETS;
base_type = ZFS_DELEG_CREATE;
ld[0] = ZFS_DELEG_NA;
break;
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
set_type = ZFS_DELEG_USER_SETS;
base_type = ZFS_DELEG_USER;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
set_type = ZFS_DELEG_GROUP_SETS;
base_type = ZFS_DELEG_GROUP;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
set_type = ZFS_DELEG_EVERYONE_SETS;
base_type = ZFS_DELEG_EVERYONE;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
default:
assert(set_type != '\0' && base_type != '\0');
}
if (perms != NULL) {
char *curr = perms;
char *end = curr + strlen(perms);
while (curr < end) {
char *delim = strchr(curr, ',');
if (delim == NULL)
delim = end;
else
*delim = '\0';
if (curr[0] == '@')
nvl = set_nvl;
else
nvl = base_nvl;
(void) nvlist_add_boolean(nvl, curr);
if (delim != end)
*delim = ',';
curr = delim + 1;
}
for (i = 0; i < 2; i++) {
char locality = ld[i];
if (locality == 0)
continue;
if (!nvlist_empty(base_nvl)) {
if (who != NULL)
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$%s",
base_type, locality, who);
else
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$",
base_type, locality);
(void) nvlist_add_nvlist(top_nvl, who_buf,
base_nvl);
}
if (!nvlist_empty(set_nvl)) {
if (who != NULL)
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$%s",
set_type, locality, who);
else
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$",
set_type, locality);
(void) nvlist_add_nvlist(top_nvl, who_buf,
set_nvl);
}
}
} else {
for (i = 0; i < 2; i++) {
char locality = ld[i];
if (locality == 0)
continue;
if (who != NULL)
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$%s", base_type, locality, who);
else
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$", base_type, locality);
(void) nvlist_add_boolean(top_nvl, who_buf);
if (who != NULL)
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$%s", set_type, locality, who);
else
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$", set_type, locality);
(void) nvlist_add_boolean(top_nvl, who_buf);
}
}
}
static int
construct_fsacl_list(boolean_t un, struct allow_opts *opts, nvlist_t **nvlp)
{
if (nvlist_alloc(nvlp, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (opts->set) {
store_allow_perm(ZFS_DELEG_NAMED_SET, opts->local,
opts->descend, opts->who, opts->perms, *nvlp);
} else if (opts->create) {
store_allow_perm(ZFS_DELEG_CREATE, opts->local,
opts->descend, NULL, opts->perms, *nvlp);
} else if (opts->everyone) {
store_allow_perm(ZFS_DELEG_EVERYONE, opts->local,
opts->descend, NULL, opts->perms, *nvlp);
} else {
char *curr = opts->who;
char *end = curr + strlen(curr);
while (curr < end) {
const char *who;
zfs_deleg_who_type_t who_type = ZFS_DELEG_WHO_UNKNOWN;
char *endch;
char *delim = strchr(curr, ',');
char errbuf[256];
char id[64];
struct passwd *p = NULL;
struct group *g = NULL;
uid_t rid;
if (delim == NULL)
delim = end;
else
*delim = '\0';
rid = (uid_t)strtol(curr, &endch, 0);
if (opts->user) {
who_type = ZFS_DELEG_USER;
if (*endch != '\0')
p = getpwnam(curr);
else
p = getpwuid(rid);
if (p != NULL)
rid = p->pw_uid;
else if (*endch != '\0') {
(void) snprintf(errbuf, 256, gettext(
"invalid user %s\n"), curr);
allow_usage(un, B_TRUE, errbuf);
}
} else if (opts->group) {
who_type = ZFS_DELEG_GROUP;
if (*endch != '\0')
g = getgrnam(curr);
else
g = getgrgid(rid);
if (g != NULL)
rid = g->gr_gid;
else if (*endch != '\0') {
(void) snprintf(errbuf, 256, gettext(
"invalid group %s\n"), curr);
allow_usage(un, B_TRUE, errbuf);
}
} else {
if (*endch != '\0') {
p = getpwnam(curr);
} else {
p = getpwuid(rid);
}
if (p == NULL) {
if (*endch != '\0') {
g = getgrnam(curr);
} else {
g = getgrgid(rid);
}
}
if (p != NULL) {
who_type = ZFS_DELEG_USER;
rid = p->pw_uid;
} else if (g != NULL) {
who_type = ZFS_DELEG_GROUP;
rid = g->gr_gid;
} else {
(void) snprintf(errbuf, 256, gettext(
"invalid user/group %s\n"), curr);
allow_usage(un, B_TRUE, errbuf);
}
}
(void) sprintf(id, "%u", rid);
who = id;
store_allow_perm(who_type, opts->local,
opts->descend, who, opts->perms, *nvlp);
curr = delim + 1;
}
}
return (0);
}
static void
print_set_creat_perms(uu_avl_t *who_avl)
{
const char *sc_title[] = {
gettext("Permission sets:\n"),
gettext("Create time permissions:\n"),
NULL
};
who_perm_node_t *who_node = NULL;
int prev_weight = -1;
for (who_node = uu_avl_first(who_avl); who_node != NULL;
who_node = uu_avl_next(who_avl, who_node)) {
uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
const char *who_name = who_node->who_perm.who_name;
int weight = who_type2weight(who_type);
boolean_t first = B_TRUE;
deleg_perm_node_t *deleg_node;
if (prev_weight != weight) {
(void) printf("%s", sc_title[weight]);
prev_weight = weight;
}
if (who_name == NULL || strnlen(who_name, 1) == 0)
(void) printf("\t");
else
(void) printf("\t%s ", who_name);
for (deleg_node = uu_avl_first(avl); deleg_node != NULL;
deleg_node = uu_avl_next(avl, deleg_node)) {
if (first) {
(void) printf("%s",
deleg_node->dpn_perm.dp_name);
first = B_FALSE;
} else
(void) printf(",%s",
deleg_node->dpn_perm.dp_name);
}
(void) printf("\n");
}
}
static void
print_uge_deleg_perms(uu_avl_t *who_avl, boolean_t local, boolean_t descend,
const char *title)
{
who_perm_node_t *who_node = NULL;
boolean_t prt_title = B_TRUE;
uu_avl_walk_t *walk;
if ((walk = uu_avl_walk_start(who_avl, UU_WALK_ROBUST)) == NULL)
nomem();
while ((who_node = uu_avl_walk_next(walk)) != NULL) {
const char *who_name = who_node->who_perm.who_name;
const char *nice_who_name = who_node->who_perm.who_ug_name;
uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
char delim = ' ';
deleg_perm_node_t *deleg_node;
boolean_t prt_who = B_TRUE;
for (deleg_node = uu_avl_first(avl);
deleg_node != NULL;
deleg_node = uu_avl_next(avl, deleg_node)) {
if (local != deleg_node->dpn_perm.dp_local ||
descend != deleg_node->dpn_perm.dp_descend)
continue;
if (prt_who) {
const char *who = NULL;
if (prt_title) {
prt_title = B_FALSE;
(void) printf("%s", title);
}
switch (who_type) {
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
who = gettext("user");
if (nice_who_name)
who_name = nice_who_name;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
who = gettext("group");
if (nice_who_name)
who_name = nice_who_name;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
who = gettext("everyone");
who_name = NULL;
break;
default:
assert(who != NULL);
}
prt_who = B_FALSE;
if (who_name == NULL)
(void) printf("\t%s", who);
else
(void) printf("\t%s %s", who, who_name);
}
(void) printf("%c%s", delim,
deleg_node->dpn_perm.dp_name);
delim = ',';
}
if (!prt_who)
(void) printf("\n");
}
uu_avl_walk_end(walk);
}
static void
print_fs_perms(fs_perm_set_t *fspset)
{
fs_perm_node_t *node = NULL;
char buf[MAXNAMELEN + 32];
const char *dsname = buf;
for (node = uu_list_first(fspset->fsps_list); node != NULL;
node = uu_list_next(fspset->fsps_list, node)) {
uu_avl_t *sc_avl = node->fspn_fsperm.fsp_sc_avl;
uu_avl_t *uge_avl = node->fspn_fsperm.fsp_uge_avl;
int left = 0;
(void) snprintf(buf, sizeof (buf),
gettext("---- Permissions on %s "),
node->fspn_fsperm.fsp_name);
(void) printf("%s", dsname);
left = 70 - strlen(buf);
while (left-- > 0)
(void) printf("-");
(void) printf("\n");
print_set_creat_perms(sc_avl);
print_uge_deleg_perms(uge_avl, B_TRUE, B_FALSE,
gettext("Local permissions:\n"));
print_uge_deleg_perms(uge_avl, B_FALSE, B_TRUE,
gettext("Descendent permissions:\n"));
print_uge_deleg_perms(uge_avl, B_TRUE, B_TRUE,
gettext("Local+Descendent permissions:\n"));
}
}
static fs_perm_set_t fs_perm_set = { NULL, NULL, NULL, NULL };
struct deleg_perms {
boolean_t un;
nvlist_t *nvl;
};
static int
set_deleg_perms(zfs_handle_t *zhp, void *data)
{
struct deleg_perms *perms = (struct deleg_perms *)data;
zfs_type_t zfs_type = zfs_get_type(zhp);
if (zfs_type != ZFS_TYPE_FILESYSTEM && zfs_type != ZFS_TYPE_VOLUME)
return (0);
return (zfs_set_fsacl(zhp, perms->un, perms->nvl));
}
static int
zfs_do_allow_unallow_impl(int argc, char **argv, boolean_t un)
{
zfs_handle_t *zhp;
nvlist_t *perm_nvl = NULL;
nvlist_t *update_perm_nvl = NULL;
int error = 1;
int c;
struct allow_opts opts = { 0 };
const char *optstr = un ? "ldugecsrh" : "ldugecsh";
/* check opts */
while ((c = getopt(argc, argv, optstr)) != -1) {
switch (c) {
case 'l':
opts.local = B_TRUE;
break;
case 'd':
opts.descend = B_TRUE;
break;
case 'u':
opts.user = B_TRUE;
break;
case 'g':
opts.group = B_TRUE;
break;
case 'e':
opts.everyone = B_TRUE;
break;
case 's':
opts.set = B_TRUE;
break;
case 'c':
opts.create = B_TRUE;
break;
case 'r':
opts.recursive = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case 'h':
opts.prt_usage = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check arguments */
parse_allow_args(argc, argv, un, &opts);
/* try to open the dataset */
if ((zhp = zfs_open(g_zfs, opts.dataset, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) == NULL) {
(void) fprintf(stderr, "Failed to open dataset: %s\n",
opts.dataset);
return (-1);
}
if (zfs_get_fsacl(zhp, &perm_nvl) != 0)
goto cleanup2;
fs_perm_set_init(&fs_perm_set);
if (parse_fs_perm_set(&fs_perm_set, perm_nvl) != 0) {
(void) fprintf(stderr, "Failed to parse fsacl permissions\n");
goto cleanup1;
}
if (opts.prt_perms)
print_fs_perms(&fs_perm_set);
else {
(void) construct_fsacl_list(un, &opts, &update_perm_nvl);
if (zfs_set_fsacl(zhp, un, update_perm_nvl) != 0)
goto cleanup0;
if (un && opts.recursive) {
struct deleg_perms data = { un, update_perm_nvl };
if (zfs_iter_filesystems(zhp, set_deleg_perms,
&data) != 0)
goto cleanup0;
}
}
error = 0;
cleanup0:
nvlist_free(perm_nvl);
nvlist_free(update_perm_nvl);
cleanup1:
fs_perm_set_fini(&fs_perm_set);
cleanup2:
zfs_close(zhp);
return (error);
}
static int
zfs_do_allow(int argc, char **argv)
{
return (zfs_do_allow_unallow_impl(argc, argv, B_FALSE));
}
static int
zfs_do_unallow(int argc, char **argv)
{
return (zfs_do_allow_unallow_impl(argc, argv, B_TRUE));
}
static int
zfs_do_hold_rele_impl(int argc, char **argv, boolean_t holding)
{
int errors = 0;
int i;
const char *tag;
boolean_t recursive = B_FALSE;
const char *opts = holding ? "rt" : "r";
int c;
/* check options */
while ((c = getopt(argc, argv, opts)) != -1) {
switch (c) {
case 'r':
recursive = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 2)
usage(B_FALSE);
tag = argv[0];
--argc;
++argv;
if (holding && tag[0] == '.') {
/* tags starting with '.' are reserved for libzfs */
(void) fprintf(stderr, gettext("tag may not start with '.'\n"));
usage(B_FALSE);
}
for (i = 0; i < argc; ++i) {
zfs_handle_t *zhp;
char parent[ZFS_MAX_DATASET_NAME_LEN];
const char *delim;
char *path = argv[i];
delim = strchr(path, '@');
if (delim == NULL) {
(void) fprintf(stderr,
gettext("'%s' is not a snapshot\n"), path);
++errors;
continue;
}
(void) strncpy(parent, path, delim - path);
parent[delim - path] = '\0';
zhp = zfs_open(g_zfs, parent,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
++errors;
continue;
}
if (holding) {
if (zfs_hold(zhp, delim+1, tag, recursive, -1) != 0)
++errors;
} else {
if (zfs_release(zhp, delim+1, tag, recursive) != 0)
++errors;
}
zfs_close(zhp);
}
return (errors != 0);
}
/*
* zfs hold [-r] [-t] <tag> <snap> ...
*
* -r Recursively hold
*
* Apply a user-hold with the given tag to the list of snapshots.
*/
static int
zfs_do_hold(int argc, char **argv)
{
return (zfs_do_hold_rele_impl(argc, argv, B_TRUE));
}
/*
* zfs release [-r] <tag> <snap> ...
*
* -r Recursively release
*
* Release a user-hold with the given tag from the list of snapshots.
*/
static int
zfs_do_release(int argc, char **argv)
{
return (zfs_do_hold_rele_impl(argc, argv, B_FALSE));
}
typedef struct holds_cbdata {
boolean_t cb_recursive;
const char *cb_snapname;
nvlist_t **cb_nvlp;
size_t cb_max_namelen;
size_t cb_max_taglen;
} holds_cbdata_t;
#define STRFTIME_FMT_STR "%a %b %e %H:%M %Y"
#define DATETIME_BUF_LEN (32)
/*
*
*/
static void
print_holds(boolean_t scripted, int nwidth, int tagwidth, nvlist_t *nvl)
{
int i;
nvpair_t *nvp = NULL;
char *hdr_cols[] = { "NAME", "TAG", "TIMESTAMP" };
const char *col;
if (!scripted) {
for (i = 0; i < 3; i++) {
col = gettext(hdr_cols[i]);
if (i < 2)
(void) printf("%-*s ", i ? tagwidth : nwidth,
col);
else
(void) printf("%s\n", col);
}
}
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
char *zname = nvpair_name(nvp);
nvlist_t *nvl2;
nvpair_t *nvp2 = NULL;
(void) nvpair_value_nvlist(nvp, &nvl2);
while ((nvp2 = nvlist_next_nvpair(nvl2, nvp2)) != NULL) {
char tsbuf[DATETIME_BUF_LEN];
char *tagname = nvpair_name(nvp2);
uint64_t val = 0;
time_t time;
struct tm t;
(void) nvpair_value_uint64(nvp2, &val);
time = (time_t)val;
(void) localtime_r(&time, &t);
(void) strftime(tsbuf, DATETIME_BUF_LEN,
gettext(STRFTIME_FMT_STR), &t);
if (scripted) {
(void) printf("%s\t%s\t%s\n", zname,
tagname, tsbuf);
} else {
(void) printf("%-*s %-*s %s\n", nwidth,
zname, tagwidth, tagname, tsbuf);
}
}
}
}
/*
* Generic callback function to list a dataset or snapshot.
*/
static int
holds_callback(zfs_handle_t *zhp, void *data)
{
holds_cbdata_t *cbp = data;
nvlist_t *top_nvl = *cbp->cb_nvlp;
nvlist_t *nvl = NULL;
nvpair_t *nvp = NULL;
const char *zname = zfs_get_name(zhp);
size_t znamelen = strlen(zname);
if (cbp->cb_recursive) {
const char *snapname;
char *delim = strchr(zname, '@');
if (delim == NULL)
return (0);
snapname = delim + 1;
if (strcmp(cbp->cb_snapname, snapname))
return (0);
}
if (zfs_get_holds(zhp, &nvl) != 0)
return (-1);
if (znamelen > cbp->cb_max_namelen)
cbp->cb_max_namelen = znamelen;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
const char *tag = nvpair_name(nvp);
size_t taglen = strlen(tag);
if (taglen > cbp->cb_max_taglen)
cbp->cb_max_taglen = taglen;
}
return (nvlist_add_nvlist(top_nvl, zname, nvl));
}
/*
* zfs holds [-rH] <snap> ...
*
* -r Lists holds that are set on the named snapshots recursively.
* -H Scripted mode; elide headers and separate columns by tabs.
*/
static int
zfs_do_holds(int argc, char **argv)
{
int errors = 0;
int c;
int i;
boolean_t scripted = B_FALSE;
boolean_t recursive = B_FALSE;
const char *opts = "rH";
nvlist_t *nvl;
int types = ZFS_TYPE_SNAPSHOT;
holds_cbdata_t cb = { 0 };
int limit = 0;
int ret = 0;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, opts)) != -1) {
switch (c) {
case 'r':
recursive = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (recursive) {
types |= ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME;
flags |= ZFS_ITER_RECURSE;
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1)
usage(B_FALSE);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
for (i = 0; i < argc; ++i) {
char *snapshot = argv[i];
const char *delim;
const char *snapname;
delim = strchr(snapshot, '@');
if (delim == NULL) {
(void) fprintf(stderr,
gettext("'%s' is not a snapshot\n"), snapshot);
++errors;
continue;
}
snapname = delim + 1;
if (recursive)
snapshot[delim - snapshot] = '\0';
cb.cb_recursive = recursive;
cb.cb_snapname = snapname;
cb.cb_nvlp = &nvl;
/*
* 1. collect holds data, set format options
*/
ret = zfs_for_each(argc, argv, flags, types, NULL, NULL, limit,
holds_callback, &cb);
if (ret != 0)
++errors;
}
/*
* 2. print holds data
*/
print_holds(scripted, cb.cb_max_namelen, cb.cb_max_taglen, nvl);
if (nvlist_empty(nvl))
(void) fprintf(stderr, gettext("no datasets available\n"));
nvlist_free(nvl);
return (0 != errors);
}
#define CHECK_SPINNER 30
#define SPINNER_TIME 3 /* seconds */
#define MOUNT_TIME 1 /* seconds */
typedef struct get_all_state {
boolean_t ga_verbose;
get_all_cb_t *ga_cbp;
} get_all_state_t;
static int
get_one_dataset(zfs_handle_t *zhp, void *data)
{
static char *spin[] = { "-", "\\", "|", "/" };
static int spinval = 0;
static int spincheck = 0;
static time_t last_spin_time = (time_t)0;
get_all_state_t *state = data;
zfs_type_t type = zfs_get_type(zhp);
if (state->ga_verbose) {
if (--spincheck < 0) {
time_t now = time(NULL);
if (last_spin_time + SPINNER_TIME < now) {
update_progress(spin[spinval++ % 4]);
last_spin_time = now;
}
spincheck = CHECK_SPINNER;
}
}
/*
* Iterate over any nested datasets.
*/
if (zfs_iter_filesystems(zhp, get_one_dataset, data) != 0) {
zfs_close(zhp);
return (1);
}
/*
* Skip any datasets whose type does not match.
*/
if ((type & ZFS_TYPE_FILESYSTEM) == 0) {
zfs_close(zhp);
return (0);
}
libzfs_add_handle(state->ga_cbp, zhp);
assert(state->ga_cbp->cb_used <= state->ga_cbp->cb_alloc);
return (0);
}
static void
get_all_datasets(get_all_cb_t *cbp, boolean_t verbose)
{
get_all_state_t state = {
.ga_verbose = verbose,
.ga_cbp = cbp
};
if (verbose)
set_progress_header(gettext("Reading ZFS config"));
(void) zfs_iter_root(g_zfs, get_one_dataset, &state);
if (verbose)
finish_progress(gettext("done."));
}
/*
* Generic callback for sharing or mounting filesystems. Because the code is so
* similar, we have a common function with an extra parameter to determine which
* mode we are using.
*/
typedef enum { OP_SHARE, OP_MOUNT } share_mount_op_t;
typedef struct share_mount_state {
share_mount_op_t sm_op;
boolean_t sm_verbose;
int sm_flags;
char *sm_options;
char *sm_proto; /* only valid for OP_SHARE */
pthread_mutex_t sm_lock; /* protects the remaining fields */
uint_t sm_total; /* number of filesystems to process */
uint_t sm_done; /* number of filesystems processed */
int sm_status; /* -1 if any of the share/mount operations failed */
} share_mount_state_t;
/*
* Share or mount a dataset.
*/
static int
share_mount_one(zfs_handle_t *zhp, int op, int flags, char *protocol,
boolean_t explicit, const char *options)
{
char mountpoint[ZFS_MAXPROPLEN];
char shareopts[ZFS_MAXPROPLEN];
char smbshareopts[ZFS_MAXPROPLEN];
const char *cmdname = op == OP_SHARE ? "share" : "mount";
struct mnttab mnt;
uint64_t zoned, canmount;
boolean_t shared_nfs, shared_smb;
assert(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM);
/*
* Check to make sure we can mount/share this dataset. If we
* are in the global zone and the filesystem is exported to a
* local zone, or if we are in a local zone and the
* filesystem is not exported, then it is an error.
*/
zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
if (zoned && getzoneid() == GLOBAL_ZONEID) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"dataset is exported to a local zone\n"), cmdname,
zfs_get_name(zhp));
return (1);
} else if (!zoned && getzoneid() != GLOBAL_ZONEID) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"permission denied\n"), cmdname,
zfs_get_name(zhp));
return (1);
}
/*
* Ignore any filesystems which don't apply to us. This
* includes those with a legacy mountpoint, or those with
* legacy share options.
*/
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, shareopts,
sizeof (shareopts), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshareopts,
sizeof (smbshareopts), NULL, NULL, 0, B_FALSE) == 0);
if (op == OP_SHARE && strcmp(shareopts, "off") == 0 &&
strcmp(smbshareopts, "off") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot share '%s': "
"legacy share\n"), zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use exports(5) or "
"smb.conf(5) to share this filesystem, or set "
"the sharenfs or sharesmb property\n"));
return (1);
}
/*
* We cannot share or mount legacy filesystems. If the
* shareopts is non-legacy but the mountpoint is legacy, we
* treat it as a legacy share.
*/
if (strcmp(mountpoint, "legacy") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"legacy mountpoint\n"), cmdname, zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use %s(8) to "
"%s this filesystem\n"), cmdname, cmdname);
return (1);
}
if (strcmp(mountpoint, "none") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': no "
"mountpoint set\n"), cmdname, zfs_get_name(zhp));
return (1);
}
/*
* canmount explicit outcome
* on no pass through
* on yes pass through
* off no return 0
* off yes display error, return 1
* noauto no return 0
* noauto yes pass through
*/
canmount = zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT);
if (canmount == ZFS_CANMOUNT_OFF) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"'canmount' property is set to 'off'\n"), cmdname,
zfs_get_name(zhp));
return (1);
} else if (canmount == ZFS_CANMOUNT_NOAUTO && !explicit) {
/*
* When performing a 'zfs mount -a', we skip any mounts for
* datasets that have 'noauto' set. Sharing a dataset with
* 'noauto' set is only allowed if it's mounted.
*/
if (op == OP_MOUNT)
return (0);
if (op == OP_SHARE && !zfs_is_mounted(zhp, NULL)) {
/* also purge it from existing exports */
zfs_unshareall_bypath(zhp, mountpoint);
return (0);
}
}
/*
* If this filesystem is encrypted and does not have
* a loaded key, we can not mount it.
*/
if ((flags & MS_CRYPT) == 0 &&
zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF &&
zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_UNAVAILABLE) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"encryption key not loaded\n"), cmdname, zfs_get_name(zhp));
return (1);
}
/*
* If this filesystem is inconsistent and has a receive resume
* token, we can not mount it.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
NULL, 0, NULL, NULL, 0, B_TRUE) == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"Contains partially-completed state from "
"\"zfs receive -s\", which can be resumed with "
"\"zfs send -t\"\n"),
cmdname, zfs_get_name(zhp));
return (1);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_REDACTED) && !(flags & MS_FORCE)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"Dataset is not complete, was created by receiving "
"a redacted zfs send stream.\n"), cmdname,
zfs_get_name(zhp));
return (1);
}
/*
* At this point, we have verified that the mountpoint and/or
* shareopts are appropriate for auto management. If the
* filesystem is already mounted or shared, return (failing
* for explicit requests); otherwise mount or share the
* filesystem.
*/
switch (op) {
case OP_SHARE:
shared_nfs = zfs_is_shared_nfs(zhp, NULL);
shared_smb = zfs_is_shared_smb(zhp, NULL);
if ((shared_nfs && shared_smb) ||
(shared_nfs && strcmp(shareopts, "on") == 0 &&
strcmp(smbshareopts, "off") == 0) ||
(shared_smb && strcmp(smbshareopts, "on") == 0 &&
strcmp(shareopts, "off") == 0)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot share "
"'%s': filesystem already shared\n"),
zfs_get_name(zhp));
return (1);
}
if (!zfs_is_mounted(zhp, NULL) &&
zfs_mount(zhp, NULL, flags) != 0)
return (1);
if (protocol == NULL) {
if (zfs_shareall(zhp) != 0)
return (1);
} else if (strcmp(protocol, "nfs") == 0) {
if (zfs_share_nfs(zhp))
return (1);
} else if (strcmp(protocol, "smb") == 0) {
if (zfs_share_smb(zhp))
return (1);
} else {
(void) fprintf(stderr, gettext("cannot share "
"'%s': invalid share type '%s' "
"specified\n"),
zfs_get_name(zhp), protocol);
return (1);
}
break;
case OP_MOUNT:
if (options == NULL)
mnt.mnt_mntopts = "";
else
mnt.mnt_mntopts = (char *)options;
if (!hasmntopt(&mnt, MNTOPT_REMOUNT) &&
zfs_is_mounted(zhp, NULL)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot mount "
"'%s': filesystem already mounted\n"),
zfs_get_name(zhp));
return (1);
}
if (zfs_mount(zhp, options, flags) != 0)
return (1);
break;
}
return (0);
}
/*
* Reports progress in the form "(current/total)". Not thread-safe.
*/
static void
report_mount_progress(int current, int total)
{
static time_t last_progress_time = 0;
time_t now = time(NULL);
char info[32];
/* display header if we're here for the first time */
if (current == 1) {
set_progress_header(gettext("Mounting ZFS filesystems"));
} else if (current != total && last_progress_time + MOUNT_TIME >= now) {
/* too soon to report again */
return;
}
last_progress_time = now;
(void) sprintf(info, "(%d/%d)", current, total);
if (current == total)
finish_progress(info);
else
update_progress(info);
}
/*
* zfs_foreach_mountpoint() callback that mounts or shares one filesystem and
* updates the progress meter.
*/
static int
share_mount_one_cb(zfs_handle_t *zhp, void *arg)
{
share_mount_state_t *sms = arg;
int ret;
ret = share_mount_one(zhp, sms->sm_op, sms->sm_flags, sms->sm_proto,
B_FALSE, sms->sm_options);
pthread_mutex_lock(&sms->sm_lock);
if (ret != 0)
sms->sm_status = ret;
sms->sm_done++;
if (sms->sm_verbose)
report_mount_progress(sms->sm_done, sms->sm_total);
pthread_mutex_unlock(&sms->sm_lock);
return (ret);
}
static void
append_options(char *mntopts, char *newopts)
{
int len = strlen(mntopts);
/* original length plus new string to append plus 1 for the comma */
if (len + 1 + strlen(newopts) >= MNT_LINE_MAX) {
(void) fprintf(stderr, gettext("the opts argument for "
"'%s' option is too long (more than %d chars)\n"),
"-o", MNT_LINE_MAX);
usage(B_FALSE);
}
if (*mntopts)
mntopts[len++] = ',';
(void) strcpy(&mntopts[len], newopts);
}
static int
share_mount(int op, int argc, char **argv)
{
int do_all = 0;
boolean_t verbose = B_FALSE;
int c, ret = 0;
char *options = NULL;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, op == OP_MOUNT ? ":alvo:Of" : "al"))
!= -1) {
switch (c) {
case 'a':
do_all = 1;
break;
case 'v':
verbose = B_TRUE;
break;
case 'l':
flags |= MS_CRYPT;
break;
case 'o':
if (*optarg == '\0') {
(void) fprintf(stderr, gettext("empty mount "
"options (-o) specified\n"));
usage(B_FALSE);
}
if (options == NULL)
options = safe_malloc(MNT_LINE_MAX + 1);
/* option validation is done later */
append_options(options, optarg);
break;
case 'O':
flags |= MS_OVERLAY;
break;
case 'f':
flags |= MS_FORCE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (do_all) {
char *protocol = NULL;
if (op == OP_SHARE && argc > 0) {
if (strcmp(argv[0], "nfs") != 0 &&
strcmp(argv[0], "smb") != 0) {
(void) fprintf(stderr, gettext("share type "
"must be 'nfs' or 'smb'\n"));
usage(B_FALSE);
}
protocol = argv[0];
argc--;
argv++;
}
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
start_progress_timer();
get_all_cb_t cb = { 0 };
get_all_datasets(&cb, verbose);
if (cb.cb_used == 0) {
free(options);
return (0);
}
share_mount_state_t share_mount_state = { 0 };
share_mount_state.sm_op = op;
share_mount_state.sm_verbose = verbose;
share_mount_state.sm_flags = flags;
share_mount_state.sm_options = options;
share_mount_state.sm_proto = protocol;
share_mount_state.sm_total = cb.cb_used;
pthread_mutex_init(&share_mount_state.sm_lock, NULL);
/*
* libshare isn't mt-safe, so only do the operation in parallel
* if we're mounting. Additionally, the key-loading option must
* be serialized so that we can prompt the user for their keys
* in a consistent manner.
*/
zfs_foreach_mountpoint(g_zfs, cb.cb_handles, cb.cb_used,
share_mount_one_cb, &share_mount_state,
op == OP_MOUNT && !(flags & MS_CRYPT));
zfs_commit_all_shares();
ret = share_mount_state.sm_status;
for (int i = 0; i < cb.cb_used; i++)
zfs_close(cb.cb_handles[i]);
free(cb.cb_handles);
} else if (argc == 0) {
FILE *mnttab;
struct mnttab entry;
if ((op == OP_SHARE) || (options != NULL)) {
(void) fprintf(stderr, gettext("missing filesystem "
"argument (specify -a for all)\n"));
usage(B_FALSE);
}
/*
* When mount is given no arguments, go through
* /proc/self/mounts and display any active ZFS mounts.
* We hide any snapshots, since they are controlled
* automatically.
*/
if ((mnttab = fopen(MNTTAB, "re")) == NULL) {
free(options);
return (ENOENT);
}
while (getmntent(mnttab, &entry) == 0) {
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0 ||
strchr(entry.mnt_special, '@') != NULL)
continue;
(void) printf("%-30s %s\n", entry.mnt_special,
entry.mnt_mountp);
}
(void) fclose(mnttab);
} else {
zfs_handle_t *zhp;
if (argc > 1) {
(void) fprintf(stderr,
gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM)) == NULL) {
ret = 1;
} else {
ret = share_mount_one(zhp, op, flags, NULL, B_TRUE,
options);
zfs_commit_all_shares();
zfs_close(zhp);
}
}
free(options);
return (ret);
}
/*
* zfs mount -a [nfs]
* zfs mount filesystem
*
* Mount all filesystems, or mount the given filesystem.
*/
static int
zfs_do_mount(int argc, char **argv)
{
return (share_mount(OP_MOUNT, argc, argv));
}
/*
* zfs share -a [nfs | smb]
* zfs share filesystem
*
* Share all filesystems, or share the given filesystem.
*/
static int
zfs_do_share(int argc, char **argv)
{
return (share_mount(OP_SHARE, argc, argv));
}
typedef struct unshare_unmount_node {
zfs_handle_t *un_zhp;
char *un_mountp;
uu_avl_node_t un_avlnode;
} unshare_unmount_node_t;
/* ARGSUSED */
static int
unshare_unmount_compare(const void *larg, const void *rarg, void *unused)
{
const unshare_unmount_node_t *l = larg;
const unshare_unmount_node_t *r = rarg;
return (strcmp(l->un_mountp, r->un_mountp));
}
/*
* Convenience routine used by zfs_do_umount() and manual_unmount(). Given an
* absolute path, find the entry /proc/self/mounts, verify that it's a
* ZFS filesystem, and unmount it appropriately.
*/
static int
unshare_unmount_path(int op, char *path, int flags, boolean_t is_manual)
{
zfs_handle_t *zhp;
int ret = 0;
struct stat64 statbuf;
struct extmnttab entry;
const char *cmdname = (op == OP_SHARE) ? "unshare" : "unmount";
ino_t path_inode;
/*
* Search for the given (major,minor) pair in the mount table.
*/
if (getextmntent(path, &entry, &statbuf) != 0) {
if (op == OP_SHARE) {
(void) fprintf(stderr, gettext("cannot %s '%s': not "
"currently mounted\n"), cmdname, path);
return (1);
}
(void) fprintf(stderr, gettext("warning: %s not in"
"/proc/self/mounts\n"), path);
if ((ret = umount2(path, flags)) != 0)
(void) fprintf(stderr, gettext("%s: %s\n"), path,
strerror(errno));
return (ret != 0);
}
path_inode = statbuf.st_ino;
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) {
(void) fprintf(stderr, gettext("cannot %s '%s': not a ZFS "
"filesystem\n"), cmdname, path);
return (1);
}
if ((zhp = zfs_open(g_zfs, entry.mnt_special,
ZFS_TYPE_FILESYSTEM)) == NULL)
return (1);
ret = 1;
if (stat64(entry.mnt_mountp, &statbuf) != 0) {
(void) fprintf(stderr, gettext("cannot %s '%s': %s\n"),
cmdname, path, strerror(errno));
goto out;
} else if (statbuf.st_ino != path_inode) {
(void) fprintf(stderr, gettext("cannot "
"%s '%s': not a mountpoint\n"), cmdname, path);
goto out;
}
if (op == OP_SHARE) {
char nfs_mnt_prop[ZFS_MAXPROPLEN];
char smbshare_prop[ZFS_MAXPROPLEN];
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, nfs_mnt_prop,
sizeof (nfs_mnt_prop), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshare_prop,
sizeof (smbshare_prop), NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0 &&
strcmp(smbshare_prop, "off") == 0) {
(void) fprintf(stderr, gettext("cannot unshare "
"'%s': legacy share\n"), path);
(void) fprintf(stderr, gettext("use exportfs(8) "
"or smbcontrol(1) to unshare this filesystem\n"));
} else if (!zfs_is_shared(zhp)) {
(void) fprintf(stderr, gettext("cannot unshare '%s': "
"not currently shared\n"), path);
} else {
ret = zfs_unshareall_bypath(zhp, path);
zfs_commit_all_shares();
}
} else {
char mtpt_prop[ZFS_MAXPROPLEN];
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mtpt_prop,
sizeof (mtpt_prop), NULL, NULL, 0, B_FALSE) == 0);
if (is_manual) {
ret = zfs_unmount(zhp, NULL, flags);
} else if (strcmp(mtpt_prop, "legacy") == 0) {
(void) fprintf(stderr, gettext("cannot unmount "
"'%s': legacy mountpoint\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use umount(8) "
"to unmount this filesystem\n"));
} else {
ret = zfs_unmountall(zhp, flags);
}
}
out:
zfs_close(zhp);
return (ret != 0);
}
/*
* Generic callback for unsharing or unmounting a filesystem.
*/
static int
unshare_unmount(int op, int argc, char **argv)
{
int do_all = 0;
int flags = 0;
int ret = 0;
int c;
zfs_handle_t *zhp;
char nfs_mnt_prop[ZFS_MAXPROPLEN];
char sharesmb[ZFS_MAXPROPLEN];
/* check options */
while ((c = getopt(argc, argv, op == OP_SHARE ? ":a" : "afu")) != -1) {
switch (c) {
case 'a':
do_all = 1;
break;
case 'f':
flags |= MS_FORCE;
break;
case 'u':
flags |= MS_CRYPT;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (do_all) {
/*
* We could make use of zfs_for_each() to walk all datasets in
* the system, but this would be very inefficient, especially
* since we would have to linearly search /proc/self/mounts for
* each one. Instead, do one pass through /proc/self/mounts
* looking for zfs entries and call zfs_unmount() for each one.
*
* Things get a little tricky if the administrator has created
* mountpoints beneath other ZFS filesystems. In this case, we
* have to unmount the deepest filesystems first. To accomplish
* this, we place all the mountpoints in an AVL tree sorted by
* the special type (dataset name), and walk the result in
* reverse to make sure to get any snapshots first.
*/
FILE *mnttab;
struct mnttab entry;
uu_avl_pool_t *pool;
uu_avl_t *tree = NULL;
unshare_unmount_node_t *node;
uu_avl_index_t idx;
uu_avl_walk_t *walk;
char *protocol = NULL;
if (op == OP_SHARE && argc > 0) {
if (strcmp(argv[0], "nfs") != 0 &&
strcmp(argv[0], "smb") != 0) {
(void) fprintf(stderr, gettext("share type "
"must be 'nfs' or 'smb'\n"));
usage(B_FALSE);
}
protocol = argv[0];
argc--;
argv++;
}
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (((pool = uu_avl_pool_create("unmount_pool",
sizeof (unshare_unmount_node_t),
offsetof(unshare_unmount_node_t, un_avlnode),
unshare_unmount_compare, UU_DEFAULT)) == NULL) ||
((tree = uu_avl_create(pool, NULL, UU_DEFAULT)) == NULL))
nomem();
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
return (ENOENT);
while (getmntent(mnttab, &entry) == 0) {
/* ignore non-ZFS entries */
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
continue;
/* ignore snapshots */
if (strchr(entry.mnt_special, '@') != NULL)
continue;
if ((zhp = zfs_open(g_zfs, entry.mnt_special,
ZFS_TYPE_FILESYSTEM)) == NULL) {
ret = 1;
continue;
}
/*
* Ignore datasets that are excluded/restricted by
* parent pool name.
*/
if (zpool_skip_pool(zfs_get_pool_name(zhp))) {
zfs_close(zhp);
continue;
}
switch (op) {
case OP_SHARE:
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") != 0)
break;
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0)
continue;
break;
case OP_MOUNT:
/* Ignore legacy mounts */
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "legacy") == 0)
continue;
/* Ignore canmount=noauto mounts */
if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) ==
ZFS_CANMOUNT_NOAUTO)
continue;
+ break;
default:
break;
}
node = safe_malloc(sizeof (unshare_unmount_node_t));
node->un_zhp = zhp;
node->un_mountp = safe_strdup(entry.mnt_mountp);
uu_avl_node_init(node, &node->un_avlnode, pool);
if (uu_avl_find(tree, node, NULL, &idx) == NULL) {
uu_avl_insert(tree, node, idx);
} else {
zfs_close(node->un_zhp);
free(node->un_mountp);
free(node);
}
}
(void) fclose(mnttab);
/*
* Walk the AVL tree in reverse, unmounting each filesystem and
* removing it from the AVL tree in the process.
*/
if ((walk = uu_avl_walk_start(tree,
UU_WALK_REVERSE | UU_WALK_ROBUST)) == NULL)
nomem();
while ((node = uu_avl_walk_next(walk)) != NULL) {
const char *mntarg = NULL;
uu_avl_remove(tree, node);
switch (op) {
case OP_SHARE:
if (zfs_unshareall_bytype(node->un_zhp,
node->un_mountp, protocol) != 0)
ret = 1;
break;
case OP_MOUNT:
if (zfs_unmount(node->un_zhp,
mntarg, flags) != 0)
ret = 1;
break;
}
zfs_close(node->un_zhp);
free(node->un_mountp);
free(node);
}
if (op == OP_SHARE)
zfs_commit_shares(protocol);
uu_avl_walk_end(walk);
uu_avl_destroy(tree);
uu_avl_pool_destroy(pool);
} else {
if (argc != 1) {
if (argc == 0)
(void) fprintf(stderr,
gettext("missing filesystem argument\n"));
else
(void) fprintf(stderr,
gettext("too many arguments\n"));
usage(B_FALSE);
}
/*
* We have an argument, but it may be a full path or a ZFS
* filesystem. Pass full paths off to unmount_path() (shared by
* manual_unmount), otherwise open the filesystem and pass to
* zfs_unmount().
*/
if (argv[0][0] == '/')
return (unshare_unmount_path(op, argv[0],
flags, B_FALSE));
if ((zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM)) == NULL)
return (1);
verify(zfs_prop_get(zhp, op == OP_SHARE ?
ZFS_PROP_SHARENFS : ZFS_PROP_MOUNTPOINT,
nfs_mnt_prop, sizeof (nfs_mnt_prop), NULL,
NULL, 0, B_FALSE) == 0);
switch (op) {
case OP_SHARE:
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB,
sharesmb, sizeof (sharesmb), NULL, NULL,
0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0 &&
strcmp(sharesmb, "off") == 0) {
(void) fprintf(stderr, gettext("cannot "
"unshare '%s': legacy share\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use "
"exports(5) or smb.conf(5) to unshare "
"this filesystem\n"));
ret = 1;
} else if (!zfs_is_shared(zhp)) {
(void) fprintf(stderr, gettext("cannot "
"unshare '%s': not currently "
"shared\n"), zfs_get_name(zhp));
ret = 1;
} else if (zfs_unshareall(zhp) != 0) {
ret = 1;
}
break;
case OP_MOUNT:
if (strcmp(nfs_mnt_prop, "legacy") == 0) {
(void) fprintf(stderr, gettext("cannot "
"unmount '%s': legacy "
"mountpoint\n"), zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use "
"umount(8) to unmount this "
"filesystem\n"));
ret = 1;
} else if (!zfs_is_mounted(zhp, NULL)) {
(void) fprintf(stderr, gettext("cannot "
"unmount '%s': not currently "
"mounted\n"),
zfs_get_name(zhp));
ret = 1;
} else if (zfs_unmountall(zhp, flags) != 0) {
ret = 1;
}
break;
}
zfs_close(zhp);
}
return (ret);
}
/*
* zfs unmount [-fu] -a
* zfs unmount [-fu] filesystem
*
* Unmount all filesystems, or a specific ZFS filesystem.
*/
static int
zfs_do_unmount(int argc, char **argv)
{
return (unshare_unmount(OP_MOUNT, argc, argv));
}
/*
* zfs unshare -a
* zfs unshare filesystem
*
* Unshare all filesystems, or a specific ZFS filesystem.
*/
static int
zfs_do_unshare(int argc, char **argv)
{
return (unshare_unmount(OP_SHARE, argc, argv));
}
static int
find_command_idx(char *command, int *idx)
{
int i;
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
continue;
if (strcmp(command, command_table[i].name) == 0) {
*idx = i;
return (0);
}
}
return (1);
}
static int
zfs_do_diff(int argc, char **argv)
{
zfs_handle_t *zhp;
int flags = 0;
char *tosnap = NULL;
char *fromsnap = NULL;
char *atp, *copy;
int err = 0;
int c;
struct sigaction sa;
while ((c = getopt(argc, argv, "FHt")) != -1) {
switch (c) {
case 'F':
flags |= ZFS_DIFF_CLASSIFY;
break;
case 'H':
flags |= ZFS_DIFF_PARSEABLE;
break;
case 't':
flags |= ZFS_DIFF_TIMESTAMP;
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr,
gettext("must provide at least one snapshot name\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
fromsnap = argv[0];
tosnap = (argc == 2) ? argv[1] : NULL;
copy = NULL;
if (*fromsnap != '@')
copy = strdup(fromsnap);
else if (tosnap)
copy = strdup(tosnap);
if (copy == NULL)
usage(B_FALSE);
if ((atp = strchr(copy, '@')) != NULL)
*atp = '\0';
if ((zhp = zfs_open(g_zfs, copy, ZFS_TYPE_FILESYSTEM)) == NULL) {
free(copy);
return (1);
}
free(copy);
/*
* Ignore SIGPIPE so that the library can give us
* information on any failure
*/
if (sigemptyset(&sa.sa_mask) == -1) {
err = errno;
goto out;
}
sa.sa_flags = 0;
sa.sa_handler = SIG_IGN;
if (sigaction(SIGPIPE, &sa, NULL) == -1) {
err = errno;
goto out;
}
err = zfs_show_diffs(zhp, STDOUT_FILENO, fromsnap, tosnap, flags);
out:
zfs_close(zhp);
return (err != 0);
}
/*
* zfs bookmark <fs@source>|<fs#source> <fs#bookmark>
*
* Creates a bookmark with the given name from the source snapshot
* or creates a copy of an existing source bookmark.
*/
static int
zfs_do_bookmark(int argc, char **argv)
{
char *source, *bookname;
char expbuf[ZFS_MAX_DATASET_NAME_LEN];
int source_type;
nvlist_t *nvl;
int ret = 0;
int c;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source argument\n"));
goto usage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing bookmark argument\n"));
goto usage;
}
source = argv[0];
bookname = argv[1];
if (strchr(source, '@') == NULL && strchr(source, '#') == NULL) {
(void) fprintf(stderr,
gettext("invalid source name '%s': "
"must contain a '@' or '#'\n"), source);
goto usage;
}
if (strchr(bookname, '#') == NULL) {
(void) fprintf(stderr,
gettext("invalid bookmark name '%s': "
"must contain a '#'\n"), bookname);
goto usage;
}
/*
* expand source or bookname to full path:
* one of them may be specified as short name
*/
{
char **expand;
char *source_short, *bookname_short;
source_short = strpbrk(source, "@#");
bookname_short = strpbrk(bookname, "#");
if (source_short == source &&
bookname_short == bookname) {
(void) fprintf(stderr, gettext(
"either source or bookmark must be specified as "
"full dataset paths"));
goto usage;
} else if (source_short != source &&
bookname_short != bookname) {
expand = NULL;
} else if (source_short != source) {
strlcpy(expbuf, source, sizeof (expbuf));
expand = &bookname;
} else if (bookname_short != bookname) {
strlcpy(expbuf, bookname, sizeof (expbuf));
expand = &source;
} else {
abort();
}
if (expand != NULL) {
*strpbrk(expbuf, "@#") = '\0'; /* dataset name in buf */
(void) strlcat(expbuf, *expand, sizeof (expbuf));
*expand = expbuf;
}
}
/* determine source type */
switch (*strpbrk(source, "@#")) {
case '@': source_type = ZFS_TYPE_SNAPSHOT; break;
case '#': source_type = ZFS_TYPE_BOOKMARK; break;
default: abort();
}
/* test the source exists */
zfs_handle_t *zhp;
zhp = zfs_open(g_zfs, source, source_type);
if (zhp == NULL)
goto usage;
zfs_close(zhp);
nvl = fnvlist_alloc();
fnvlist_add_string(nvl, bookname, source);
ret = lzc_bookmark(nvl, NULL);
fnvlist_free(nvl);
if (ret != 0) {
const char *err_msg = NULL;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create bookmark '%s'"), bookname);
switch (ret) {
case EXDEV:
err_msg = "bookmark is in a different pool";
break;
case ZFS_ERR_BOOKMARK_SOURCE_NOT_ANCESTOR:
err_msg = "source is not an ancestor of the "
"new bookmark's dataset";
break;
case EEXIST:
err_msg = "bookmark exists";
break;
case EINVAL:
err_msg = "invalid argument";
break;
case ENOTSUP:
err_msg = "bookmark feature not enabled";
break;
case ENOSPC:
err_msg = "out of space";
break;
case ENOENT:
err_msg = "dataset does not exist";
break;
default:
(void) zfs_standard_error(g_zfs, ret, errbuf);
break;
}
if (err_msg != NULL) {
(void) fprintf(stderr, "%s: %s\n", errbuf,
dgettext(TEXT_DOMAIN, err_msg));
}
}
return (ret != 0);
usage:
usage(B_FALSE);
return (-1);
}
static int
zfs_do_channel_program(int argc, char **argv)
{
int ret, fd, c;
char *progbuf, *filename, *poolname;
size_t progsize, progread;
nvlist_t *outnvl = NULL;
uint64_t instrlimit = ZCP_DEFAULT_INSTRLIMIT;
uint64_t memlimit = ZCP_DEFAULT_MEMLIMIT;
boolean_t sync_flag = B_TRUE, json_output = B_FALSE;
zpool_handle_t *zhp;
/* check options */
while ((c = getopt(argc, argv, "nt:m:j")) != -1) {
switch (c) {
case 't':
case 'm': {
uint64_t arg;
char *endp;
errno = 0;
arg = strtoull(optarg, &endp, 0);
if (errno != 0 || *endp != '\0') {
(void) fprintf(stderr, gettext(
"invalid argument "
"'%s': expected integer\n"), optarg);
goto usage;
}
if (c == 't') {
instrlimit = arg;
} else {
ASSERT3U(c, ==, 'm');
memlimit = arg;
}
break;
}
case 'n': {
sync_flag = B_FALSE;
break;
}
case 'j': {
json_output = B_TRUE;
break;
}
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
if (argc < 2) {
(void) fprintf(stderr,
gettext("invalid number of arguments\n"));
goto usage;
}
poolname = argv[0];
filename = argv[1];
if (strcmp(filename, "-") == 0) {
fd = 0;
filename = "standard input";
} else if ((fd = open(filename, O_RDONLY)) < 0) {
(void) fprintf(stderr, gettext("cannot open '%s': %s\n"),
filename, strerror(errno));
return (1);
}
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
(void) fprintf(stderr, gettext("cannot open pool '%s'\n"),
poolname);
if (fd != 0)
(void) close(fd);
return (1);
}
zpool_close(zhp);
/*
* Read in the channel program, expanding the program buffer as
* necessary.
*/
progread = 0;
progsize = 1024;
progbuf = safe_malloc(progsize);
do {
ret = read(fd, progbuf + progread, progsize - progread);
progread += ret;
if (progread == progsize && ret > 0) {
progsize *= 2;
progbuf = safe_realloc(progbuf, progsize);
}
} while (ret > 0);
if (fd != 0)
(void) close(fd);
if (ret < 0) {
free(progbuf);
(void) fprintf(stderr,
gettext("cannot read '%s': %s\n"),
filename, strerror(errno));
return (1);
}
progbuf[progread] = '\0';
/*
* Any remaining arguments are passed as arguments to the lua script as
* a string array:
* {
* "argv" -> [ "arg 1", ... "arg n" ],
* }
*/
nvlist_t *argnvl = fnvlist_alloc();
fnvlist_add_string_array(argnvl, ZCP_ARG_CLIARGV, argv + 2, argc - 2);
if (sync_flag) {
ret = lzc_channel_program(poolname, progbuf,
instrlimit, memlimit, argnvl, &outnvl);
} else {
ret = lzc_channel_program_nosync(poolname, progbuf,
instrlimit, memlimit, argnvl, &outnvl);
}
if (ret != 0) {
/*
* On error, report the error message handed back by lua if one
* exists. Otherwise, generate an appropriate error message,
* falling back on strerror() for an unexpected return code.
*/
char *errstring = NULL;
const char *msg = gettext("Channel program execution failed");
uint64_t instructions = 0;
if (outnvl != NULL && nvlist_exists(outnvl, ZCP_RET_ERROR)) {
(void) nvlist_lookup_string(outnvl,
ZCP_RET_ERROR, &errstring);
if (errstring == NULL)
errstring = strerror(ret);
if (ret == ETIME) {
(void) nvlist_lookup_uint64(outnvl,
ZCP_ARG_INSTRLIMIT, &instructions);
}
} else {
switch (ret) {
case EINVAL:
errstring =
"Invalid instruction or memory limit.";
break;
case ENOMEM:
errstring = "Return value too large.";
break;
case ENOSPC:
errstring = "Memory limit exhausted.";
break;
case ETIME:
errstring = "Timed out.";
break;
case EPERM:
errstring = "Permission denied. Channel "
"programs must be run as root.";
break;
default:
(void) zfs_standard_error(g_zfs, ret, msg);
}
}
if (errstring != NULL)
(void) fprintf(stderr, "%s:\n%s\n", msg, errstring);
if (ret == ETIME && instructions != 0)
(void) fprintf(stderr,
gettext("%llu Lua instructions\n"),
(u_longlong_t)instructions);
} else {
if (json_output) {
(void) nvlist_print_json(stdout, outnvl);
} else if (nvlist_empty(outnvl)) {
(void) fprintf(stdout, gettext("Channel program fully "
"executed and did not produce output.\n"));
} else {
(void) fprintf(stdout, gettext("Channel program fully "
"executed and produced output:\n"));
dump_nvlist(outnvl, 4);
}
}
free(progbuf);
fnvlist_free(outnvl);
fnvlist_free(argnvl);
return (ret != 0);
usage:
usage(B_FALSE);
return (-1);
}
typedef struct loadkey_cbdata {
boolean_t cb_loadkey;
boolean_t cb_recursive;
boolean_t cb_noop;
char *cb_keylocation;
uint64_t cb_numfailed;
uint64_t cb_numattempted;
} loadkey_cbdata_t;
static int
load_key_callback(zfs_handle_t *zhp, void *data)
{
int ret;
boolean_t is_encroot;
loadkey_cbdata_t *cb = data;
uint64_t keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
/*
* If we are working recursively, we want to skip loading / unloading
* keys for non-encryption roots and datasets whose keys are already
* in the desired end-state.
*/
if (cb->cb_recursive) {
ret = zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
if (ret != 0)
return (ret);
if (!is_encroot)
return (0);
if ((cb->cb_loadkey && keystatus == ZFS_KEYSTATUS_AVAILABLE) ||
(!cb->cb_loadkey && keystatus == ZFS_KEYSTATUS_UNAVAILABLE))
return (0);
}
cb->cb_numattempted++;
if (cb->cb_loadkey)
ret = zfs_crypto_load_key(zhp, cb->cb_noop, cb->cb_keylocation);
else
ret = zfs_crypto_unload_key(zhp);
if (ret != 0) {
cb->cb_numfailed++;
return (ret);
}
return (0);
}
static int
load_unload_keys(int argc, char **argv, boolean_t loadkey)
{
int c, ret = 0, flags = 0;
boolean_t do_all = B_FALSE;
loadkey_cbdata_t cb = { 0 };
cb.cb_loadkey = loadkey;
while ((c = getopt(argc, argv, "anrL:")) != -1) {
/* noop and alternate keylocations only apply to zfs load-key */
if (loadkey) {
switch (c) {
case 'n':
cb.cb_noop = B_TRUE;
continue;
case 'L':
cb.cb_keylocation = optarg;
continue;
default:
break;
}
}
switch (c) {
case 'a':
do_all = B_TRUE;
cb.cb_recursive = B_TRUE;
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
cb.cb_recursive = B_TRUE;
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (!do_all && argc == 0) {
(void) fprintf(stderr,
gettext("Missing dataset argument or -a option\n"));
usage(B_FALSE);
}
if (do_all && argc != 0) {
(void) fprintf(stderr,
gettext("Cannot specify dataset with -a option\n"));
usage(B_FALSE);
}
if (cb.cb_recursive && cb.cb_keylocation != NULL &&
strcmp(cb.cb_keylocation, "prompt") != 0) {
(void) fprintf(stderr, gettext("alternate keylocation may only "
"be 'prompt' with -r or -a\n"));
usage(B_FALSE);
}
ret = zfs_for_each(argc, argv, flags,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, NULL, NULL, 0,
load_key_callback, &cb);
if (cb.cb_noop || (cb.cb_recursive && cb.cb_numattempted != 0)) {
(void) printf(gettext("%llu / %llu key(s) successfully %s\n"),
(u_longlong_t)(cb.cb_numattempted - cb.cb_numfailed),
(u_longlong_t)cb.cb_numattempted,
loadkey ? (cb.cb_noop ? "verified" : "loaded") :
"unloaded");
}
if (cb.cb_numfailed != 0)
ret = -1;
return (ret);
}
static int
zfs_do_load_key(int argc, char **argv)
{
return (load_unload_keys(argc, argv, B_TRUE));
}
static int
zfs_do_unload_key(int argc, char **argv)
{
return (load_unload_keys(argc, argv, B_FALSE));
}
static int
zfs_do_change_key(int argc, char **argv)
{
int c, ret;
uint64_t keystatus;
boolean_t loadkey = B_FALSE, inheritkey = B_FALSE;
zfs_handle_t *zhp = NULL;
nvlist_t *props = fnvlist_alloc();
while ((c = getopt(argc, argv, "lio:")) != -1) {
switch (c) {
case 'l':
loadkey = B_TRUE;
break;
case 'i':
inheritkey = B_TRUE;
break;
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
return (1);
}
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
if (inheritkey && !nvlist_empty(props)) {
(void) fprintf(stderr,
gettext("Properties not allowed for inheriting\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("Missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("Too many arguments\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[argc - 1],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
usage(B_FALSE);
if (loadkey) {
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
if (keystatus != ZFS_KEYSTATUS_AVAILABLE) {
ret = zfs_crypto_load_key(zhp, B_FALSE, NULL);
if (ret != 0) {
nvlist_free(props);
zfs_close(zhp);
return (-1);
}
}
/* refresh the properties so the new keystatus is visible */
zfs_refresh_properties(zhp);
}
ret = zfs_crypto_rewrap(zhp, props, inheritkey);
if (ret != 0) {
nvlist_free(props);
zfs_close(zhp);
return (-1);
}
nvlist_free(props);
zfs_close(zhp);
return (0);
}
/*
* 1) zfs project [-d|-r] <file|directory ...>
* List project ID and inherit flag of file(s) or directories.
* -d: List the directory itself, not its children.
* -r: List subdirectories recursively.
*
* 2) zfs project -C [-k] [-r] <file|directory ...>
* Clear project inherit flag and/or ID on the file(s) or directories.
* -k: Keep the project ID unchanged. If not specified, the project ID
* will be reset as zero.
* -r: Clear on subdirectories recursively.
*
* 3) zfs project -c [-0] [-d|-r] [-p id] <file|directory ...>
* Check project ID and inherit flag on the file(s) or directories,
* report the outliers.
* -0: Print file name followed by a NUL instead of newline.
* -d: Check the directory itself, not its children.
* -p: Specify the referenced ID for comparing with the target file(s)
* or directories' project IDs. If not specified, the target (top)
* directory's project ID will be used as the referenced one.
* -r: Check subdirectories recursively.
*
* 4) zfs project [-p id] [-r] [-s] <file|directory ...>
* Set project ID and/or inherit flag on the file(s) or directories.
* -p: Set the project ID as the given id.
* -r: Set on subdirectories recursively. If not specify "-p" option,
* it will use top-level directory's project ID as the given id,
* then set both project ID and inherit flag on all descendants
* of the top-level directory.
* -s: Set project inherit flag.
*/
static int
zfs_do_project(int argc, char **argv)
{
zfs_project_control_t zpc = {
.zpc_expected_projid = ZFS_INVALID_PROJID,
.zpc_op = ZFS_PROJECT_OP_DEFAULT,
.zpc_dironly = B_FALSE,
.zpc_keep_projid = B_FALSE,
.zpc_newline = B_TRUE,
.zpc_recursive = B_FALSE,
.zpc_set_flag = B_FALSE,
};
int ret = 0, c;
if (argc < 2)
usage(B_FALSE);
while ((c = getopt(argc, argv, "0Ccdkp:rs")) != -1) {
switch (c) {
case '0':
zpc.zpc_newline = B_FALSE;
break;
case 'C':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_op = ZFS_PROJECT_OP_CLEAR;
break;
case 'c':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_op = ZFS_PROJECT_OP_CHECK;
break;
case 'd':
zpc.zpc_dironly = B_TRUE;
/* overwrite "-r" option */
zpc.zpc_recursive = B_FALSE;
break;
case 'k':
zpc.zpc_keep_projid = B_TRUE;
break;
case 'p': {
char *endptr;
errno = 0;
zpc.zpc_expected_projid = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0') {
(void) fprintf(stderr,
gettext("project ID must be less than "
"%u\n"), UINT32_MAX);
usage(B_FALSE);
}
if (zpc.zpc_expected_projid >= UINT32_MAX) {
(void) fprintf(stderr,
gettext("invalid project ID\n"));
usage(B_FALSE);
}
break;
}
case 'r':
zpc.zpc_recursive = B_TRUE;
/* overwrite "-d" option */
zpc.zpc_dironly = B_FALSE;
break;
case 's':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_set_flag = B_TRUE;
zpc.zpc_op = ZFS_PROJECT_OP_SET;
break;
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (zpc.zpc_op == ZFS_PROJECT_OP_DEFAULT) {
if (zpc.zpc_expected_projid != ZFS_INVALID_PROJID)
zpc.zpc_op = ZFS_PROJECT_OP_SET;
else
zpc.zpc_op = ZFS_PROJECT_OP_LIST;
}
switch (zpc.zpc_op) {
case ZFS_PROJECT_OP_LIST:
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_CHECK:
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_CLEAR:
if (zpc.zpc_dironly) {
(void) fprintf(stderr,
gettext("'-d' is useless together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
if (zpc.zpc_expected_projid != ZFS_INVALID_PROJID) {
(void) fprintf(stderr,
gettext("'-p' is useless together with '-C'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_SET:
if (zpc.zpc_dironly) {
(void) fprintf(stderr,
gettext("'-d' is useless for set project ID and/or "
"inherit flag\n"));
usage(B_FALSE);
}
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
break;
default:
ASSERT(0);
break;
}
argv += optind;
argc -= optind;
if (argc == 0) {
(void) fprintf(stderr,
gettext("missing file or directory target(s)\n"));
usage(B_FALSE);
}
for (int i = 0; i < argc; i++) {
int err;
err = zfs_project_handle(argv[i], &zpc);
if (err && !ret)
ret = err;
}
return (ret);
}
static int
zfs_do_wait(int argc, char **argv)
{
boolean_t enabled[ZFS_WAIT_NUM_ACTIVITIES];
int error, i;
int c;
/* By default, wait for all types of activity. */
for (i = 0; i < ZFS_WAIT_NUM_ACTIVITIES; i++)
enabled[i] = B_TRUE;
while ((c = getopt(argc, argv, "t:")) != -1) {
switch (c) {
case 't':
{
static char *col_subopts[] = { "deleteq", NULL };
char *value;
/* Reset activities array */
bzero(&enabled, sizeof (enabled));
while (*optarg != '\0') {
int activity = getsubopt(&optarg, col_subopts,
&value);
if (activity < 0) {
(void) fprintf(stderr,
gettext("invalid activity '%s'\n"),
value);
usage(B_FALSE);
}
enabled[activity] = B_TRUE;
}
break;
}
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argv += optind;
argc -= optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing 'filesystem' "
"argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
zfs_handle_t *zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM);
if (zhp == NULL)
return (1);
for (;;) {
boolean_t missing = B_FALSE;
boolean_t any_waited = B_FALSE;
for (int i = 0; i < ZFS_WAIT_NUM_ACTIVITIES; i++) {
boolean_t waited;
if (!enabled[i])
continue;
error = zfs_wait_status(zhp, i, &missing, &waited);
if (error != 0 || missing)
break;
any_waited = (any_waited || waited);
}
if (error != 0 || missing || !any_waited)
break;
}
zfs_close(zhp);
return (error);
}
/*
* Display version message
*/
static int
zfs_do_version(int argc, char **argv)
{
if (zfs_version_print() == -1)
return (1);
return (0);
}
int
main(int argc, char **argv)
{
int ret = 0;
int i = 0;
char *cmdname;
char **newargv;
(void) setlocale(LC_ALL, "");
(void) setlocale(LC_NUMERIC, "C");
(void) textdomain(TEXT_DOMAIN);
opterr = 0;
/*
* Make sure the user has specified some command.
*/
if (argc < 2) {
(void) fprintf(stderr, gettext("missing command\n"));
usage(B_FALSE);
}
cmdname = argv[1];
/*
* The 'umount' command is an alias for 'unmount'
*/
if (strcmp(cmdname, "umount") == 0)
cmdname = "unmount";
/*
* The 'recv' command is an alias for 'receive'
*/
if (strcmp(cmdname, "recv") == 0)
cmdname = "receive";
/*
* The 'snap' command is an alias for 'snapshot'
*/
if (strcmp(cmdname, "snap") == 0)
cmdname = "snapshot";
/*
* Special case '-?'
*/
if ((strcmp(cmdname, "-?") == 0) ||
(strcmp(cmdname, "--help") == 0))
usage(B_TRUE);
/*
* Special case '-V|--version'
*/
if ((strcmp(cmdname, "-V") == 0) || (strcmp(cmdname, "--version") == 0))
return (zfs_do_version(argc, argv));
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s\n", libzfs_error_init(errno));
return (1);
}
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
libzfs_print_on_error(g_zfs, B_TRUE);
/*
* Many commands modify input strings for string parsing reasons.
* We create a copy to protect the original argv.
*/
newargv = malloc((argc + 1) * sizeof (newargv[0]));
for (i = 0; i < argc; i++)
newargv[i] = strdup(argv[i]);
newargv[argc] = NULL;
/*
* Run the appropriate command.
*/
libzfs_mnttab_cache(g_zfs, B_TRUE);
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, newargv + 1);
} else if (strchr(cmdname, '=') != NULL) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, newargv);
} else {
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
usage(B_FALSE);
ret = 1;
}
for (i = 0; i < argc; i++)
free(newargv[i]);
free(newargv);
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);
libzfs_fini(g_zfs);
/*
* The 'ZFS_ABORT' environment variable causes us to dump core on exit
* for the purposes of running ::findleaks.
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
return (ret);
}
#ifdef __FreeBSD__
#include <sys/jail.h>
#include <jail.h>
/*
* Attach/detach the given dataset to/from the given jail
*/
/* ARGSUSED */
static int
zfs_do_jail_impl(int argc, char **argv, boolean_t attach)
{
zfs_handle_t *zhp;
int jailid, ret;
/* check number of arguments */
if (argc < 3) {
(void) fprintf(stderr, gettext("missing argument(s)\n"));
usage(B_FALSE);
}
if (argc > 3) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
jailid = jail_getid(argv[1]);
if (jailid < 0) {
(void) fprintf(stderr, gettext("invalid jail id or name\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[2], ZFS_TYPE_FILESYSTEM);
if (zhp == NULL)
return (1);
ret = (zfs_jail(zhp, jailid, attach) != 0);
zfs_close(zhp);
return (ret);
}
/*
* zfs jail jailid filesystem
*
* Attach the given dataset to the given jail
*/
/* ARGSUSED */
static int
zfs_do_jail(int argc, char **argv)
{
return (zfs_do_jail_impl(argc, argv, B_TRUE));
}
/*
* zfs unjail jailid filesystem
*
* Detach the given dataset from the given jail
*/
/* ARGSUSED */
static int
zfs_do_unjail(int argc, char **argv)
{
return (zfs_do_jail_impl(argc, argv, B_FALSE));
}
#endif
diff --git a/sys/contrib/openzfs/cmd/zpool/zpool.d/media b/sys/contrib/openzfs/cmd/zpool/zpool.d/media
index 05bc15918bc9..5683cdc3c023 100755
--- a/sys/contrib/openzfs/cmd/zpool/zpool.d/media
+++ b/sys/contrib/openzfs/cmd/zpool/zpool.d/media
@@ -1,27 +1,34 @@
#!/bin/sh
#
# Print out the type of device
#
if [ "$1" = "-h" ] ; then
- echo "Show whether a vdev is a file, hdd, or ssd."
+ echo "Show whether a vdev is a file, hdd, ssd, or iscsi."
exit
fi
if [ -b "$VDEV_UPATH" ]; then
device=$(basename "$VDEV_UPATH")
val=$(cat "/sys/block/$device/queue/rotational" 2>/dev/null)
if [ "$val" = "0" ]; then
MEDIA="ssd"
fi
if [ "$val" = "1" ]; then
MEDIA="hdd"
fi
+
+ vpd_pg83="/sys/block/$device/device/vpd_pg83"
+ if [ -f "$vpd_pg83" ]; then
+ if grep -q --binary "iqn." "$vpd_pg83"; then
+ MEDIA="iscsi"
+ fi
+ fi
else
if [ -f "$VDEV_UPATH" ]; then
MEDIA="file"
fi
fi
echo "media=$MEDIA"
diff --git a/sys/contrib/openzfs/cmd/zpool_influxdb/zpool_influxdb.c b/sys/contrib/openzfs/cmd/zpool_influxdb/zpool_influxdb.c
index 417d48f3aab1..f326b0420ee8 100644
--- a/sys/contrib/openzfs/cmd/zpool_influxdb/zpool_influxdb.c
+++ b/sys/contrib/openzfs/cmd/zpool_influxdb/zpool_influxdb.c
@@ -1,851 +1,851 @@
/*
* Gather top-level ZFS pool and resilver/scan statistics and print using
* influxdb line protocol
* usage: [options] [pool_name]
* where options are:
* --execd, -e run in telegraf execd input plugin mode, [CR] on
* stdin causes a sample to be printed and wait for
* the next [CR]
* --no-histograms, -n don't print histogram data (reduces cardinality
* if you don't care about histograms)
* --sum-histogram-buckets, -s sum histogram bucket values
*
* To integrate into telegraf use one of:
* 1. the `inputs.execd` plugin with the `--execd` option
* 2. the `inputs.exec` plugin to simply run with no options
*
* NOTE: libzfs is an unstable interface. YMMV.
*
* The design goals of this software include:
* + be as lightweight as possible
* + reduce the number of external dependencies as far as possible, hence
* there is no dependency on a client library for managing the metric
* collection -- info is printed, KISS
* + broken pools or kernel bugs can cause this process to hang in an
* unkillable state. For this reason, it is best to keep the damage limited
* to a small process like zpool_influxdb rather than a larger collector.
*
* Copyright 2018-2020 Richard Elling
*
* This software is dual-licensed MIT and CDDL.
*
* The MIT License (MIT)
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License Version 1.0 (CDDL-1.0).
* You can obtain a copy of the license from the top-level file
* "OPENSOLARIS.LICENSE" or at <http://opensource.org/licenses/CDDL-1.0>.
* You may not use this file except in compliance with the license.
*
* See the License for the specific language governing permissions
* and limitations under the License.
*
* CDDL HEADER END
*/
#include <string.h>
#include <getopt.h>
#include <stdio.h>
#include <stdint.h>
#include <inttypes.h>
#include <libzfs.h>
#define POOL_MEASUREMENT "zpool_stats"
#define SCAN_MEASUREMENT "zpool_scan_stats"
#define VDEV_MEASUREMENT "zpool_vdev_stats"
#define POOL_LATENCY_MEASUREMENT "zpool_latency"
#define POOL_QUEUE_MEASUREMENT "zpool_vdev_queue"
#define MIN_LAT_INDEX 10 /* minimum latency index 10 = 1024ns */
#define POOL_IO_SIZE_MEASUREMENT "zpool_io_size"
#define MIN_SIZE_INDEX 9 /* minimum size index 9 = 512 bytes */
/* global options */
int execd_mode = 0;
int no_histograms = 0;
int sum_histogram_buckets = 0;
char metric_data_type = 'u';
uint64_t metric_value_mask = UINT64_MAX;
uint64_t timestamp = 0;
int complained_about_sync = 0;
char *tags = "";
typedef int (*stat_printer_f)(nvlist_t *, const char *, const char *);
/*
* influxdb line protocol rules for escaping are important because the
* zpool name can include characters that need to be escaped
*
* caller is responsible for freeing result
*/
static char *
escape_string(const char *s)
{
const char *c;
char *d;
char *t = (char *)malloc(ZFS_MAX_DATASET_NAME_LEN * 2);
if (t == NULL) {
fprintf(stderr, "error: cannot allocate memory\n");
exit(1);
}
for (c = s, d = t; *c != '\0'; c++, d++) {
switch (*c) {
case ' ':
case ',':
case '=':
case '\\':
*d++ = '\\';
- /* FALLTHROUGH */
+ fallthrough;
default:
*d = *c;
}
}
*d = '\0';
return (t);
}
/*
* print key=value where value is a uint64_t
*/
static void
print_kv(char *key, uint64_t value)
{
printf("%s=%llu%c", key,
(u_longlong_t)value & metric_value_mask, metric_data_type);
}
/*
* print_scan_status() prints the details as often seen in the "zpool status"
* output. However, unlike the zpool command, which is intended for humans,
* this output is suitable for long-term tracking in influxdb.
* TODO: update to include issued scan data
*/
static int
print_scan_status(nvlist_t *nvroot, const char *pool_name)
{
uint_t c;
int64_t elapsed;
uint64_t examined, pass_exam, paused_time, paused_ts, rate;
uint64_t remaining_time;
pool_scan_stat_t *ps = NULL;
double pct_done;
char *state[DSS_NUM_STATES] = {
"none", "scanning", "finished", "canceled"};
char *func;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c);
/*
* ignore if there are no stats
*/
if (ps == NULL)
return (0);
/*
* return error if state is bogus
*/
if (ps->pss_state >= DSS_NUM_STATES ||
ps->pss_func >= POOL_SCAN_FUNCS) {
if (complained_about_sync % 1000 == 0) {
fprintf(stderr, "error: cannot decode scan stats: "
"ZFS is out of sync with compiled zpool_influxdb");
complained_about_sync++;
}
return (1);
}
switch (ps->pss_func) {
case POOL_SCAN_NONE:
func = "none_requested";
break;
case POOL_SCAN_SCRUB:
func = "scrub";
break;
case POOL_SCAN_RESILVER:
func = "resilver";
break;
#ifdef POOL_SCAN_REBUILD
case POOL_SCAN_REBUILD:
func = "rebuild";
break;
#endif
default:
func = "scan";
}
/* overall progress */
examined = ps->pss_examined ? ps->pss_examined : 1;
pct_done = 0.0;
if (ps->pss_to_examine > 0)
pct_done = 100.0 * examined / ps->pss_to_examine;
#ifdef EZFS_SCRUB_PAUSED
paused_ts = ps->pss_pass_scrub_pause;
paused_time = ps->pss_pass_scrub_spent_paused;
#else
paused_ts = 0;
paused_time = 0;
#endif
/* calculations for this pass */
if (ps->pss_state == DSS_SCANNING) {
elapsed = (int64_t)time(NULL) - (int64_t)ps->pss_pass_start -
(int64_t)paused_time;
elapsed = (elapsed > 0) ? elapsed : 1;
pass_exam = ps->pss_pass_exam ? ps->pss_pass_exam : 1;
rate = pass_exam / elapsed;
rate = (rate > 0) ? rate : 1;
remaining_time = ps->pss_to_examine - examined / rate;
} else {
elapsed =
(int64_t)ps->pss_end_time - (int64_t)ps->pss_pass_start -
(int64_t)paused_time;
elapsed = (elapsed > 0) ? elapsed : 1;
pass_exam = ps->pss_pass_exam ? ps->pss_pass_exam : 1;
rate = pass_exam / elapsed;
remaining_time = 0;
}
rate = rate ? rate : 1;
/* influxdb line protocol format: "tags metrics timestamp" */
printf("%s%s,function=%s,name=%s,state=%s ",
SCAN_MEASUREMENT, tags, func, pool_name, state[ps->pss_state]);
print_kv("end_ts", ps->pss_end_time);
print_kv(",errors", ps->pss_errors);
print_kv(",examined", examined);
print_kv(",issued", ps->pss_issued);
print_kv(",pass_examined", pass_exam);
print_kv(",pass_issued", ps->pss_pass_issued);
print_kv(",paused_ts", paused_ts);
print_kv(",paused_t", paused_time);
printf(",pct_done=%.2f", pct_done);
print_kv(",processed", ps->pss_processed);
print_kv(",rate", rate);
print_kv(",remaining_t", remaining_time);
print_kv(",start_ts", ps->pss_start_time);
print_kv(",to_examine", ps->pss_to_examine);
print_kv(",to_process", ps->pss_to_process);
printf(" %llu\n", (u_longlong_t)timestamp);
return (0);
}
/*
* get a vdev name that corresponds to the top-level vdev names
* printed by `zpool status`
*/
static char *
get_vdev_name(nvlist_t *nvroot, const char *parent_name)
{
static char vdev_name[256];
char *vdev_type = NULL;
uint64_t vdev_id = 0;
if (nvlist_lookup_string(nvroot, ZPOOL_CONFIG_TYPE,
&vdev_type) != 0) {
vdev_type = "unknown";
}
if (nvlist_lookup_uint64(
nvroot, ZPOOL_CONFIG_ID, &vdev_id) != 0) {
vdev_id = UINT64_MAX;
}
if (parent_name == NULL) {
(void) snprintf(vdev_name, sizeof (vdev_name), "%s",
vdev_type);
} else {
(void) snprintf(vdev_name, sizeof (vdev_name),
"%s/%s-%llu",
parent_name, vdev_type, (u_longlong_t)vdev_id);
}
return (vdev_name);
}
/*
* get a string suitable for an influxdb tag that describes this vdev
*
* By default only the vdev hierarchical name is shown, separated by '/'
* If the vdev has an associated path, which is typical of leaf vdevs,
* then the path is added.
* It would be nice to have the devid instead of the path, but under
* Linux we cannot be sure a devid will exist and we'd rather have
* something than nothing, so we'll use path instead.
*/
static char *
get_vdev_desc(nvlist_t *nvroot, const char *parent_name)
{
static char vdev_desc[2 * MAXPATHLEN];
char *vdev_type = NULL;
uint64_t vdev_id = 0;
char vdev_value[MAXPATHLEN];
char *vdev_path = NULL;
char *s, *t;
if (nvlist_lookup_string(nvroot, ZPOOL_CONFIG_TYPE, &vdev_type) != 0) {
vdev_type = "unknown";
}
if (nvlist_lookup_uint64(nvroot, ZPOOL_CONFIG_ID, &vdev_id) != 0) {
vdev_id = UINT64_MAX;
}
if (nvlist_lookup_string(
nvroot, ZPOOL_CONFIG_PATH, &vdev_path) != 0) {
vdev_path = NULL;
}
if (parent_name == NULL) {
s = escape_string(vdev_type);
(void) snprintf(vdev_value, sizeof (vdev_value), "vdev=%s", s);
free(s);
} else {
s = escape_string((char *)parent_name);
t = escape_string(vdev_type);
(void) snprintf(vdev_value, sizeof (vdev_value),
"vdev=%s/%s-%llu", s, t, (u_longlong_t)vdev_id);
free(s);
free(t);
}
if (vdev_path == NULL) {
(void) snprintf(vdev_desc, sizeof (vdev_desc), "%s",
vdev_value);
} else {
s = escape_string(vdev_path);
(void) snprintf(vdev_desc, sizeof (vdev_desc), "path=%s,%s",
s, vdev_value);
free(s);
}
return (vdev_desc);
}
/*
* vdev summary stats are a combination of the data shown by
* `zpool status` and `zpool list -v`
*/
static int
print_summary_stats(nvlist_t *nvroot, const char *pool_name,
const char *parent_name)
{
uint_t c;
vdev_stat_t *vs;
char *vdev_desc = NULL;
vdev_desc = get_vdev_desc(nvroot, parent_name);
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) != 0) {
return (1);
}
printf("%s%s,name=%s,state=%s,%s ", POOL_MEASUREMENT, tags,
pool_name, zpool_state_to_name((vdev_state_t)vs->vs_state,
(vdev_aux_t)vs->vs_aux), vdev_desc);
print_kv("alloc", vs->vs_alloc);
print_kv(",free", vs->vs_space - vs->vs_alloc);
print_kv(",size", vs->vs_space);
print_kv(",read_bytes", vs->vs_bytes[ZIO_TYPE_READ]);
print_kv(",read_errors", vs->vs_read_errors);
print_kv(",read_ops", vs->vs_ops[ZIO_TYPE_READ]);
print_kv(",write_bytes", vs->vs_bytes[ZIO_TYPE_WRITE]);
print_kv(",write_errors", vs->vs_write_errors);
print_kv(",write_ops", vs->vs_ops[ZIO_TYPE_WRITE]);
print_kv(",checksum_errors", vs->vs_checksum_errors);
print_kv(",fragmentation", vs->vs_fragmentation);
printf(" %llu\n", (u_longlong_t)timestamp);
return (0);
}
/*
* vdev latency stats are histograms stored as nvlist arrays of uint64.
* Latency stats include the ZIO scheduler classes plus lower-level
* vdev latencies.
*
* In many cases, the top-level "root" view obscures the underlying
* top-level vdev operations. For example, if a pool has a log, special,
* or cache device, then each can behave very differently. It is useful
* to see how each is responding.
*/
static int
print_vdev_latency_stats(nvlist_t *nvroot, const char *pool_name,
const char *parent_name)
{
uint_t c, end = 0;
nvlist_t *nv_ex;
char *vdev_desc = NULL;
/* short_names become part of the metric name and are influxdb-ready */
struct lat_lookup {
char *name;
char *short_name;
uint64_t sum;
uint64_t *array;
};
struct lat_lookup lat_type[] = {
{ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO, "total_read", 0},
{ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO, "total_write", 0},
{ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO, "disk_read", 0},
{ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO, "disk_write", 0},
{ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO, "sync_read", 0},
{ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO, "sync_write", 0},
{ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO, "async_read", 0},
{ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO, "async_write", 0},
{ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO, "scrub", 0},
#ifdef ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO
{ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO, "trim", 0},
#endif
{ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO, "rebuild", 0},
{NULL, NULL}
};
if (nvlist_lookup_nvlist(nvroot,
ZPOOL_CONFIG_VDEV_STATS_EX, &nv_ex) != 0) {
return (6);
}
vdev_desc = get_vdev_desc(nvroot, parent_name);
for (int i = 0; lat_type[i].name; i++) {
if (nvlist_lookup_uint64_array(nv_ex,
lat_type[i].name, &lat_type[i].array, &c) != 0) {
fprintf(stderr, "error: can't get %s\n",
lat_type[i].name);
return (3);
}
/* end count count, all of the arrays are the same size */
end = c - 1;
}
for (int bucket = 0; bucket <= end; bucket++) {
if (bucket < MIN_LAT_INDEX) {
/* don't print, but collect the sum */
for (int i = 0; lat_type[i].name; i++) {
lat_type[i].sum += lat_type[i].array[bucket];
}
continue;
}
if (bucket < end) {
printf("%s%s,le=%0.6f,name=%s,%s ",
POOL_LATENCY_MEASUREMENT, tags,
(float)(1ULL << bucket) * 1e-9,
pool_name, vdev_desc);
} else {
printf("%s%s,le=+Inf,name=%s,%s ",
POOL_LATENCY_MEASUREMENT, tags, pool_name,
vdev_desc);
}
for (int i = 0; lat_type[i].name; i++) {
if (bucket <= MIN_LAT_INDEX || sum_histogram_buckets) {
lat_type[i].sum += lat_type[i].array[bucket];
} else {
lat_type[i].sum = lat_type[i].array[bucket];
}
print_kv(lat_type[i].short_name, lat_type[i].sum);
if (lat_type[i + 1].name != NULL) {
printf(",");
}
}
printf(" %llu\n", (u_longlong_t)timestamp);
}
return (0);
}
/*
* vdev request size stats are histograms stored as nvlist arrays of uint64.
* Request size stats include the ZIO scheduler classes plus lower-level
* vdev sizes. Both independent (ind) and aggregated (agg) sizes are reported.
*
* In many cases, the top-level "root" view obscures the underlying
* top-level vdev operations. For example, if a pool has a log, special,
* or cache device, then each can behave very differently. It is useful
* to see how each is responding.
*/
static int
print_vdev_size_stats(nvlist_t *nvroot, const char *pool_name,
const char *parent_name)
{
uint_t c, end = 0;
nvlist_t *nv_ex;
char *vdev_desc = NULL;
/* short_names become the field name */
struct size_lookup {
char *name;
char *short_name;
uint64_t sum;
uint64_t *array;
};
struct size_lookup size_type[] = {
{ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO, "sync_read_ind"},
{ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO, "sync_write_ind"},
{ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO, "async_read_ind"},
{ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO, "async_write_ind"},
{ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO, "scrub_read_ind"},
{ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO, "sync_read_agg"},
{ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO, "sync_write_agg"},
{ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO, "async_read_agg"},
{ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO, "async_write_agg"},
{ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO, "scrub_read_agg"},
#ifdef ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO
{ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO, "trim_write_ind"},
{ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO, "trim_write_agg"},
#endif
{ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO, "rebuild_write_ind"},
{ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO, "rebuild_write_agg"},
{NULL, NULL}
};
if (nvlist_lookup_nvlist(nvroot,
ZPOOL_CONFIG_VDEV_STATS_EX, &nv_ex) != 0) {
return (6);
}
vdev_desc = get_vdev_desc(nvroot, parent_name);
for (int i = 0; size_type[i].name; i++) {
if (nvlist_lookup_uint64_array(nv_ex, size_type[i].name,
&size_type[i].array, &c) != 0) {
fprintf(stderr, "error: can't get %s\n",
size_type[i].name);
return (3);
}
/* end count count, all of the arrays are the same size */
end = c - 1;
}
for (int bucket = 0; bucket <= end; bucket++) {
if (bucket < MIN_SIZE_INDEX) {
/* don't print, but collect the sum */
for (int i = 0; size_type[i].name; i++) {
size_type[i].sum += size_type[i].array[bucket];
}
continue;
}
if (bucket < end) {
printf("%s%s,le=%llu,name=%s,%s ",
POOL_IO_SIZE_MEASUREMENT, tags, 1ULL << bucket,
pool_name, vdev_desc);
} else {
printf("%s%s,le=+Inf,name=%s,%s ",
POOL_IO_SIZE_MEASUREMENT, tags, pool_name,
vdev_desc);
}
for (int i = 0; size_type[i].name; i++) {
if (bucket <= MIN_SIZE_INDEX || sum_histogram_buckets) {
size_type[i].sum += size_type[i].array[bucket];
} else {
size_type[i].sum = size_type[i].array[bucket];
}
print_kv(size_type[i].short_name, size_type[i].sum);
if (size_type[i + 1].name != NULL) {
printf(",");
}
}
printf(" %llu\n", (u_longlong_t)timestamp);
}
return (0);
}
/*
* ZIO scheduler queue stats are stored as gauges. This is unfortunate
* because the values can change very rapidly and any point-in-time
* value will quickly be obsoleted. It is also not easy to downsample.
* Thus only the top-level queue stats might be beneficial... maybe.
*/
static int
print_queue_stats(nvlist_t *nvroot, const char *pool_name,
const char *parent_name)
{
nvlist_t *nv_ex;
uint64_t value;
/* short_names are used for the field name */
struct queue_lookup {
char *name;
char *short_name;
};
struct queue_lookup queue_type[] = {
{ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, "sync_r_active"},
{ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, "sync_w_active"},
{ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, "async_r_active"},
{ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, "async_w_active"},
{ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, "async_scrub_active"},
{ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE, "rebuild_active"},
{ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE, "sync_r_pend"},
{ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE, "sync_w_pend"},
{ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE, "async_r_pend"},
{ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE, "async_w_pend"},
{ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE, "async_scrub_pend"},
{ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE, "rebuild_pend"},
{NULL, NULL}
};
if (nvlist_lookup_nvlist(nvroot,
ZPOOL_CONFIG_VDEV_STATS_EX, &nv_ex) != 0) {
return (6);
}
printf("%s%s,name=%s,%s ", POOL_QUEUE_MEASUREMENT, tags, pool_name,
get_vdev_desc(nvroot, parent_name));
for (int i = 0; queue_type[i].name; i++) {
if (nvlist_lookup_uint64(nv_ex,
queue_type[i].name, &value) != 0) {
fprintf(stderr, "error: can't get %s\n",
queue_type[i].name);
return (3);
}
print_kv(queue_type[i].short_name, value);
if (queue_type[i + 1].name != NULL) {
printf(",");
}
}
printf(" %llu\n", (u_longlong_t)timestamp);
return (0);
}
/*
* top-level vdev stats are at the pool level
*/
static int
print_top_level_vdev_stats(nvlist_t *nvroot, const char *pool_name)
{
nvlist_t *nv_ex;
uint64_t value;
/* short_names become part of the metric name */
struct queue_lookup {
char *name;
char *short_name;
};
struct queue_lookup queue_type[] = {
{ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE, "sync_r_active_queue"},
{ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE, "sync_w_active_queue"},
{ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE, "async_r_active_queue"},
{ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE, "async_w_active_queue"},
{ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE, "async_scrub_active_queue"},
{ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE, "rebuild_active_queue"},
{ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE, "sync_r_pend_queue"},
{ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE, "sync_w_pend_queue"},
{ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE, "async_r_pend_queue"},
{ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE, "async_w_pend_queue"},
{ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE, "async_scrub_pend_queue"},
{ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE, "rebuild_pend_queue"},
{NULL, NULL}
};
if (nvlist_lookup_nvlist(nvroot,
ZPOOL_CONFIG_VDEV_STATS_EX, &nv_ex) != 0) {
return (6);
}
printf("%s%s,name=%s,vdev=root ", VDEV_MEASUREMENT, tags,
pool_name);
for (int i = 0; queue_type[i].name; i++) {
if (nvlist_lookup_uint64(nv_ex,
queue_type[i].name, &value) != 0) {
fprintf(stderr, "error: can't get %s\n",
queue_type[i].name);
return (3);
}
if (i > 0)
printf(",");
print_kv(queue_type[i].short_name, value);
}
printf(" %llu\n", (u_longlong_t)timestamp);
return (0);
}
/*
* recursive stats printer
*/
static int
print_recursive_stats(stat_printer_f func, nvlist_t *nvroot,
const char *pool_name, const char *parent_name, int descend)
{
uint_t c, children;
nvlist_t **child;
char vdev_name[256];
int err;
err = func(nvroot, pool_name, parent_name);
if (err)
return (err);
if (descend && nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
(void) strlcpy(vdev_name, get_vdev_name(nvroot, parent_name),
sizeof (vdev_name));
for (c = 0; c < children; c++) {
print_recursive_stats(func, child[c], pool_name,
vdev_name, descend);
}
}
return (0);
}
/*
* call-back to print the stats from the pool config
*
* Note: if the pool is broken, this can hang indefinitely and perhaps in an
* unkillable state.
*/
static int
print_stats(zpool_handle_t *zhp, void *data)
{
uint_t c;
int err;
boolean_t missing;
nvlist_t *config, *nvroot;
vdev_stat_t *vs;
struct timespec tv;
char *pool_name;
/* if not this pool return quickly */
if (data &&
strncmp(data, zpool_get_name(zhp), ZFS_MAX_DATASET_NAME_LEN) != 0) {
zpool_close(zhp);
return (0);
}
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (1);
}
config = zpool_get_config(zhp, NULL);
if (clock_gettime(CLOCK_REALTIME, &tv) != 0)
timestamp = (uint64_t)time(NULL) * 1000000000;
else
timestamp =
((uint64_t)tv.tv_sec * 1000000000) + (uint64_t)tv.tv_nsec;
if (nvlist_lookup_nvlist(
config, ZPOOL_CONFIG_VDEV_TREE, &nvroot) != 0) {
zpool_close(zhp);
return (2);
}
if (nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) != 0) {
zpool_close(zhp);
return (3);
}
pool_name = escape_string(zpool_get_name(zhp));
err = print_recursive_stats(print_summary_stats, nvroot,
pool_name, NULL, 1);
/* if any of these return an error, skip the rest */
if (err == 0)
err = print_top_level_vdev_stats(nvroot, pool_name);
if (no_histograms == 0) {
if (err == 0)
err = print_recursive_stats(print_vdev_latency_stats, nvroot,
pool_name, NULL, 1);
if (err == 0)
err = print_recursive_stats(print_vdev_size_stats, nvroot,
pool_name, NULL, 1);
if (err == 0)
err = print_recursive_stats(print_queue_stats, nvroot,
pool_name, NULL, 0);
}
if (err == 0)
err = print_scan_status(nvroot, pool_name);
free(pool_name);
zpool_close(zhp);
return (err);
}
static void
usage(char *name)
{
fprintf(stderr, "usage: %s [--execd][--no-histograms]"
"[--sum-histogram-buckets] [--signed-int] [poolname]\n", name);
exit(EXIT_FAILURE);
}
int
main(int argc, char *argv[])
{
int opt;
int ret = 8;
char *line = NULL;
size_t len, tagslen = 0;
struct option long_options[] = {
{"execd", no_argument, NULL, 'e'},
{"help", no_argument, NULL, 'h'},
{"no-histograms", no_argument, NULL, 'n'},
{"signed-int", no_argument, NULL, 'i'},
{"sum-histogram-buckets", no_argument, NULL, 's'},
{"tags", required_argument, NULL, 't'},
{0, 0, 0, 0}
};
while ((opt = getopt_long(
argc, argv, "ehinst:", long_options, NULL)) != -1) {
switch (opt) {
case 'e':
execd_mode = 1;
break;
case 'i':
metric_data_type = 'i';
metric_value_mask = INT64_MAX;
break;
case 'n':
no_histograms = 1;
break;
case 's':
sum_histogram_buckets = 1;
break;
case 't':
tagslen = strlen(optarg) + 2;
tags = calloc(tagslen, 1);
if (tags == NULL) {
fprintf(stderr,
"error: cannot allocate memory "
"for tags\n");
exit(1);
}
(void) snprintf(tags, tagslen, ",%s", optarg);
break;
default:
usage(argv[0]);
}
}
libzfs_handle_t *g_zfs;
if ((g_zfs = libzfs_init()) == NULL) {
fprintf(stderr,
"error: cannot initialize libzfs. "
"Is the zfs module loaded or zrepl running?\n");
exit(EXIT_FAILURE);
}
if (execd_mode == 0) {
ret = zpool_iter(g_zfs, print_stats, argv[optind]);
return (ret);
}
while (getline(&line, &len, stdin) != -1) {
ret = zpool_iter(g_zfs, print_stats, argv[optind]);
fflush(stdout);
}
return (ret);
}
diff --git a/sys/contrib/openzfs/config/Abigail.am b/sys/contrib/openzfs/config/Abigail.am
index 49673a309e3b..94687b90eef2 100644
--- a/sys/contrib/openzfs/config/Abigail.am
+++ b/sys/contrib/openzfs/config/Abigail.am
@@ -1,31 +1,33 @@
#
# When performing an ABI check the following options are applied:
#
# --no-unreferenced-symbols: Exclude symbols which are not referenced by
# any debug information. Without this _init() and _fini() are incorrectly
# reported on CentOS7 for libuutil.so.
#
# --headers-dir1: Limit ABI checks to public OpenZFS headers, otherwise
# changes in public system headers are also reported.
#
# --suppressions: Honor a suppressions file for each library to provide
# a mechanism for suppressing harmless warnings.
#
PHONY += checkabi storeabi
checkabi:
for lib in $(lib_LTLIBRARIES) ; do \
abidiff --no-unreferenced-symbols \
--headers-dir1 ../../include \
--suppressions $${lib%.la}.suppr \
$${lib%.la}.abi .libs/$${lib%.la}.so ; \
done
storeabi:
cd .libs ; \
for lib in $(lib_LTLIBRARIES) ; do \
abidw --no-show-locs \
--no-corpus-path \
+ --no-comp-dir-path \
+ --type-id-style hash \
$${lib%.la}.so > ../$${lib%.la}.abi ; \
done
diff --git a/sys/contrib/openzfs/config/Rules.am b/sys/contrib/openzfs/config/Rules.am
index be80c1e9c7ce..20779ba49259 100644
--- a/sys/contrib/openzfs/config/Rules.am
+++ b/sys/contrib/openzfs/config/Rules.am
@@ -1,67 +1,68 @@
#
# Default build rules for all user space components, every Makefile.am
# should include these rules and override or extend them as needed.
#
PHONY =
DEFAULT_INCLUDES = \
-include $(top_builddir)/zfs_config.h \
-I$(top_builddir)/include \
-I$(top_srcdir)/include \
-I$(top_srcdir)/module/icp/include \
-I$(top_srcdir)/lib/libspl/include
if BUILD_LINUX
DEFAULT_INCLUDES += \
-I$(top_srcdir)/lib/libspl/include/os/linux
endif
if BUILD_FREEBSD
DEFAULT_INCLUDES += \
-I$(top_srcdir)/lib/libspl/include/os/freebsd
endif
AM_LIBTOOLFLAGS = --silent
AM_CFLAGS = -std=gnu99 -Wall -Wstrict-prototypes -Wmissing-prototypes
AM_CFLAGS += -fno-strict-aliasing
AM_CFLAGS += $(NO_OMIT_FRAME_POINTER)
+AM_CFLAGS += $(IMPLICIT_FALLTHROUGH)
AM_CFLAGS += $(DEBUG_CFLAGS)
AM_CFLAGS += $(ASAN_CFLAGS)
AM_CFLAGS += $(CODE_COVERAGE_CFLAGS) $(NO_FORMAT_ZERO_LENGTH)
if BUILD_FREEBSD
AM_CFLAGS += -fPIC -Werror -Wno-unknown-pragmas -Wno-enum-conversion
AM_CFLAGS += -include $(top_srcdir)/include/os/freebsd/spl/sys/ccompile.h
AM_CFLAGS += -I/usr/include -I/usr/local/include
endif
AM_CPPFLAGS = -D_GNU_SOURCE
AM_CPPFLAGS += -D_REENTRANT
AM_CPPFLAGS += -D_FILE_OFFSET_BITS=64
AM_CPPFLAGS += -D_LARGEFILE64_SOURCE
AM_CPPFLAGS += -DLIBEXECDIR=\"$(libexecdir)\"
AM_CPPFLAGS += -DRUNSTATEDIR=\"$(runstatedir)\"
AM_CPPFLAGS += -DSBINDIR=\"$(sbindir)\"
AM_CPPFLAGS += -DSYSCONFDIR=\"$(sysconfdir)\"
AM_CPPFLAGS += -DPKGDATADIR=\"$(pkgdatadir)\"
AM_CPPFLAGS += $(DEBUG_CPPFLAGS)
AM_CPPFLAGS += $(CODE_COVERAGE_CPPFLAGS)
if BUILD_LINUX
AM_CPPFLAGS += -DTEXT_DOMAIN=\"zfs-linux-user\"
endif
if BUILD_FREEBSD
AM_CPPFLAGS += -DTEXT_DOMAIN=\"zfs-freebsd-user\"
endif
AM_CPPFLAGS += -D"strtok(...)=strtok(__VA_ARGS__) __attribute__((deprecated(\"Use strtok_r(3) instead!\")))"
AM_CPPFLAGS += -D"__xpg_basename(...)=__xpg_basename(__VA_ARGS__) __attribute__((deprecated(\"basename(3) is underspecified. Use zfs_basename() instead!\")))"
AM_CPPFLAGS += -D"basename(...)=basename(__VA_ARGS__) __attribute__((deprecated(\"basename(3) is underspecified. Use zfs_basename() instead!\")))"
AM_CPPFLAGS += -D"dirname(...)=dirname(__VA_ARGS__) __attribute__((deprecated(\"dirname(3) is underspecified. Use zfs_dirnamelen() instead!\")))"
AM_LDFLAGS = $(DEBUG_LDFLAGS)
AM_LDFLAGS += $(ASAN_LDFLAGS)
if BUILD_FREEBSD
AM_LDFLAGS += -fstack-protector-strong -shared
AM_LDFLAGS += -Wl,-x -Wl,--fatal-warnings -Wl,--warn-shared-textrel
AM_LDFLAGS += -lm
endif
diff --git a/sys/contrib/openzfs/config/always-compiler-options.m4 b/sys/contrib/openzfs/config/always-compiler-options.m4
index a84123317989..ce84f7e60684 100644
--- a/sys/contrib/openzfs/config/always-compiler-options.m4
+++ b/sys/contrib/openzfs/config/always-compiler-options.m4
@@ -1,204 +1,227 @@
dnl #
dnl # Enabled -fsanitize=address if supported by gcc.
dnl #
dnl # LDFLAGS needs -fsanitize=address at all times so libraries compiled with
dnl # it will be linked successfully. CFLAGS will vary by binary being built.
dnl #
dnl # The ASAN_OPTIONS environment variable can be used to further control
dnl # the behavior of binaries and libraries build with -fsanitize=address.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_CC_ASAN], [
AC_MSG_CHECKING([whether to build with -fsanitize=address support])
AC_ARG_ENABLE([asan],
[AS_HELP_STRING([--enable-asan],
[Enable -fsanitize=address support @<:@default=no@:>@])],
[],
[enable_asan=no])
AM_CONDITIONAL([ASAN_ENABLED], [test x$enable_asan = xyes])
AC_SUBST([ASAN_ENABLED], [$enable_asan])
AC_MSG_RESULT($enable_asan)
AS_IF([ test "$enable_asan" = "yes" ], [
AC_MSG_CHECKING([whether $CC supports -fsanitize=address])
saved_cflags="$CFLAGS"
CFLAGS="$CFLAGS -Werror -fsanitize=address"
AC_LINK_IFELSE([
AC_LANG_SOURCE([[ int main() { return 0; } ]])
], [
ASAN_CFLAGS="-fsanitize=address"
ASAN_LDFLAGS="-fsanitize=address"
ASAN_ZFS="_with_asan"
AC_MSG_RESULT([yes])
], [
AC_MSG_ERROR([$CC does not support -fsanitize=address])
])
CFLAGS="$saved_cflags"
], [
ASAN_CFLAGS=""
ASAN_LDFLAGS=""
ASAN_ZFS="_without_asan"
])
AC_SUBST([ASAN_CFLAGS])
AC_SUBST([ASAN_LDFLAGS])
AC_SUBST([ASAN_ZFS])
])
dnl #
dnl # Check if gcc supports -Wframe-larger-than=<size> option.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_CC_FRAME_LARGER_THAN], [
AC_MSG_CHECKING([whether $CC supports -Wframe-larger-than=<size>])
saved_flags="$CFLAGS"
CFLAGS="$CFLAGS -Werror -Wframe-larger-than=4096"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
FRAME_LARGER_THAN="-Wframe-larger-than=4096"
AC_MSG_RESULT([yes])
], [
FRAME_LARGER_THAN=""
AC_MSG_RESULT([no])
])
CFLAGS="$saved_flags"
AC_SUBST([FRAME_LARGER_THAN])
])
dnl #
dnl # Check if gcc supports -Wno-format-truncation option.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_CC_NO_FORMAT_TRUNCATION], [
AC_MSG_CHECKING([whether $CC supports -Wno-format-truncation])
saved_flags="$CFLAGS"
CFLAGS="$CFLAGS -Werror -Wno-format-truncation"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
NO_FORMAT_TRUNCATION=-Wno-format-truncation
AC_MSG_RESULT([yes])
], [
NO_FORMAT_TRUNCATION=
AC_MSG_RESULT([no])
])
CFLAGS="$saved_flags"
AC_SUBST([NO_FORMAT_TRUNCATION])
])
dnl #
dnl # Check if gcc supports -Wno-format-truncation option.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_CC_NO_FORMAT_ZERO_LENGTH], [
AC_MSG_CHECKING([whether $CC supports -Wno-format-zero-length])
saved_flags="$CFLAGS"
CFLAGS="$CFLAGS -Werror -Wno-format-zero-length"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
NO_FORMAT_ZERO_LENGTH=-Wno-format-zero-length
AC_MSG_RESULT([yes])
], [
NO_FORMAT_ZERO_LENGTH=
AC_MSG_RESULT([no])
])
CFLAGS="$saved_flags"
AC_SUBST([NO_FORMAT_ZERO_LENGTH])
])
dnl #
dnl # Check if gcc supports -Wno-bool-compare option.
dnl #
dnl # We actually invoke gcc with the -Wbool-compare option
dnl # and infer the 'no-' version does or doesn't exist based upon
dnl # the results. This is required because when checking any of
dnl # no- prefixed options gcc always returns success.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_CC_NO_BOOL_COMPARE], [
AC_MSG_CHECKING([whether $CC supports -Wno-bool-compare])
saved_flags="$CFLAGS"
CFLAGS="$CFLAGS -Werror -Wbool-compare"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
NO_BOOL_COMPARE=-Wno-bool-compare
AC_MSG_RESULT([yes])
], [
NO_BOOL_COMPARE=
AC_MSG_RESULT([no])
])
CFLAGS="$saved_flags"
AC_SUBST([NO_BOOL_COMPARE])
])
dnl #
dnl # Check if gcc supports -Wno-unused-but-set-variable option.
dnl #
dnl # We actually invoke gcc with the -Wunused-but-set-variable option
dnl # and infer the 'no-' version does or doesn't exist based upon
dnl # the results. This is required because when checking any of
dnl # no- prefixed options gcc always returns success.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_CC_NO_UNUSED_BUT_SET_VARIABLE], [
AC_MSG_CHECKING([whether $CC supports -Wno-unused-but-set-variable])
saved_flags="$CFLAGS"
CFLAGS="$CFLAGS -Werror -Wunused-but-set-variable"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
NO_UNUSED_BUT_SET_VARIABLE=-Wno-unused-but-set-variable
AC_MSG_RESULT([yes])
], [
NO_UNUSED_BUT_SET_VARIABLE=
AC_MSG_RESULT([no])
])
CFLAGS="$saved_flags"
AC_SUBST([NO_UNUSED_BUT_SET_VARIABLE])
])
+dnl #
+dnl # Check if gcc supports -Wimplicit-fallthrough option.
+dnl #
+AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_CC_IMPLICIT_FALLTHROUGH], [
+ AC_MSG_CHECKING([whether $CC supports -Wimplicit-fallthrough])
+
+ saved_flags="$CFLAGS"
+ CFLAGS="$CFLAGS -Werror -Wimplicit-fallthrough"
+
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
+ IMPLICIT_FALLTHROUGH=-Wimplicit-fallthrough
+ AC_DEFINE([HAVE_IMPLICIT_FALLTHROUGH], 1,
+ [Define if compiler supports -Wimplicit-fallthrough])
+ AC_MSG_RESULT([yes])
+ ], [
+ IMPLICIT_FALLTHROUGH=
+ AC_MSG_RESULT([no])
+ ])
+
+ CFLAGS="$saved_flags"
+ AC_SUBST([IMPLICIT_FALLTHROUGH])
+])
+
dnl #
dnl # Check if gcc supports -fno-omit-frame-pointer option.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_CC_NO_OMIT_FRAME_POINTER], [
AC_MSG_CHECKING([whether $CC supports -fno-omit-frame-pointer])
saved_flags="$CFLAGS"
CFLAGS="$CFLAGS -Werror -fno-omit-frame-pointer"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
NO_OMIT_FRAME_POINTER=-fno-omit-frame-pointer
AC_MSG_RESULT([yes])
], [
NO_OMIT_FRAME_POINTER=
AC_MSG_RESULT([no])
])
CFLAGS="$saved_flags"
AC_SUBST([NO_OMIT_FRAME_POINTER])
])
dnl #
dnl # Check if cc supports -fno-ipa-sra option.
dnl #
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_CC_NO_IPA_SRA], [
AC_MSG_CHECKING([whether $CC supports -fno-ipa-sra])
saved_flags="$CFLAGS"
CFLAGS="$CFLAGS -Werror -fno-ipa-sra"
AC_COMPILE_IFELSE([AC_LANG_PROGRAM([], [])], [
NO_IPA_SRA=-fno-ipa-sra
AC_MSG_RESULT([yes])
], [
NO_IPA_SRA=
AC_MSG_RESULT([no])
])
CFLAGS="$saved_flags"
AC_SUBST([NO_IPA_SRA])
])
diff --git a/sys/contrib/openzfs/config/kernel-acl.m4 b/sys/contrib/openzfs/config/kernel-acl.m4
index c6da4df24eb9..a155b59d006a 100644
--- a/sys/contrib/openzfs/config/kernel-acl.m4
+++ b/sys/contrib/openzfs/config/kernel-acl.m4
@@ -1,310 +1,331 @@
dnl #
dnl # Check if posix_acl_release can be used from a ZFS_META_LICENSED
dnl # module. The is_owner_or_cap macro was replaced by
dnl # inode_owner_or_capable
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_POSIX_ACL_RELEASE], [
ZFS_LINUX_TEST_SRC([posix_acl_release], [
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/posix_acl.h>
], [
struct posix_acl *tmp = posix_acl_alloc(1, 0);
posix_acl_release(tmp);
], [], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_POSIX_ACL_RELEASE], [
AC_MSG_CHECKING([whether posix_acl_release() is available])
ZFS_LINUX_TEST_RESULT([posix_acl_release], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_POSIX_ACL_RELEASE, 1,
[posix_acl_release() is available])
AC_MSG_CHECKING([whether posix_acl_release() is GPL-only])
ZFS_LINUX_TEST_RESULT([posix_acl_release_license], [
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_POSIX_ACL_RELEASE_GPL_ONLY, 1,
[posix_acl_release() is GPL-only])
])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 3.14 API change,
dnl # set_cached_acl() and forget_cached_acl() changed from inline to
dnl # EXPORT_SYMBOL. In the former case, they may not be usable because of
dnl # posix_acl_release. In the latter case, we can always use them.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_SET_CACHED_ACL_USABLE], [
ZFS_LINUX_TEST_SRC([set_cached_acl], [
#include <linux/cred.h>
#include <linux/fs.h>
#include <linux/posix_acl.h>
], [
struct inode *ip = NULL;
struct posix_acl *acl = posix_acl_alloc(1, 0);
set_cached_acl(ip, ACL_TYPE_ACCESS, acl);
forget_cached_acl(ip, ACL_TYPE_ACCESS);
], [], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_SET_CACHED_ACL_USABLE], [
AC_MSG_CHECKING([whether set_cached_acl() is usable])
ZFS_LINUX_TEST_RESULT([set_cached_acl_license], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SET_CACHED_ACL_USABLE, 1,
[set_cached_acl() is usable])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 3.1 API change,
dnl # posix_acl_chmod() was added as the preferred interface.
dnl #
dnl # 3.14 API change,
dnl # posix_acl_chmod() was changed to __posix_acl_chmod()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_POSIX_ACL_CHMOD], [
ZFS_LINUX_TEST_SRC([posix_acl_chmod], [
#include <linux/fs.h>
#include <linux/posix_acl.h>
],[
posix_acl_chmod(NULL, 0, 0)
])
ZFS_LINUX_TEST_SRC([__posix_acl_chmod], [
#include <linux/fs.h>
#include <linux/posix_acl.h>
],[
__posix_acl_chmod(NULL, 0, 0)
])
])
AC_DEFUN([ZFS_AC_KERNEL_POSIX_ACL_CHMOD], [
AC_MSG_CHECKING([whether __posix_acl_chmod exists])
ZFS_LINUX_TEST_RESULT([__posix_acl_chmod], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE___POSIX_ACL_CHMOD, 1,
[__posix_acl_chmod() exists])
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether posix_acl_chmod exists])
ZFS_LINUX_TEST_RESULT([posix_acl_chmod], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_POSIX_ACL_CHMOD, 1,
[posix_acl_chmod() exists])
],[
ZFS_LINUX_TEST_ERROR([posix_acl_chmod()])
])
])
])
dnl #
dnl # 3.1 API change,
dnl # posix_acl_equiv_mode now wants an umode_t instead of a mode_t
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_POSIX_ACL_EQUIV_MODE_WANTS_UMODE_T], [
ZFS_LINUX_TEST_SRC([posix_acl_equiv_mode], [
#include <linux/fs.h>
#include <linux/posix_acl.h>
],[
umode_t tmp;
posix_acl_equiv_mode(NULL, &tmp);
])
])
AC_DEFUN([ZFS_AC_KERNEL_POSIX_ACL_EQUIV_MODE_WANTS_UMODE_T], [
AC_MSG_CHECKING([whether posix_acl_equiv_mode() wants umode_t])
ZFS_LINUX_TEST_RESULT([posix_acl_equiv_mode], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([posix_acl_equiv_mode()])
])
])
dnl #
dnl # 4.8 API change,
dnl # The function posix_acl_valid now must be passed a namespace.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_POSIX_ACL_VALID_WITH_NS], [
ZFS_LINUX_TEST_SRC([posix_acl_valid_with_ns], [
#include <linux/fs.h>
#include <linux/posix_acl.h>
],[
struct user_namespace *user_ns = NULL;
const struct posix_acl *acl = NULL;
int error;
error = posix_acl_valid(user_ns, acl);
])
])
AC_DEFUN([ZFS_AC_KERNEL_POSIX_ACL_VALID_WITH_NS], [
AC_MSG_CHECKING([whether posix_acl_valid() wants user namespace])
ZFS_LINUX_TEST_RESULT([posix_acl_valid_with_ns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_POSIX_ACL_VALID_WITH_NS, 1,
[posix_acl_valid() wants user namespace])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 3.1 API change,
dnl # Check if inode_operations contains the function get_acl
dnl #
+dnl # 5.15 API change,
+dnl # Added the bool rcu argument to get_acl for rcu path walk.
+dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_OPERATIONS_GET_ACL], [
ZFS_LINUX_TEST_SRC([inode_operations_get_acl], [
#include <linux/fs.h>
struct posix_acl *get_acl_fn(struct inode *inode, int type)
{ return NULL; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.get_acl = get_acl_fn,
};
],[])
+
+ ZFS_LINUX_TEST_SRC([inode_operations_get_acl_rcu], [
+ #include <linux/fs.h>
+
+ struct posix_acl *get_acl_fn(struct inode *inode, int type,
+ bool rcu) { return NULL; }
+
+ static const struct inode_operations
+ iops __attribute__ ((unused)) = {
+ .get_acl = get_acl_fn,
+ };
+ ],[])
])
AC_DEFUN([ZFS_AC_KERNEL_INODE_OPERATIONS_GET_ACL], [
AC_MSG_CHECKING([whether iops->get_acl() exists])
ZFS_LINUX_TEST_RESULT([inode_operations_get_acl], [
AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_GET_ACL, 1, [iops->get_acl() exists])
],[
- ZFS_LINUX_TEST_ERROR([iops->get_acl()])
+ ZFS_LINUX_TEST_RESULT([inode_operations_get_acl_rcu], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_GET_ACL_RCU, 1, [iops->get_acl() takes rcu])
+ ],[
+ ZFS_LINUX_TEST_ERROR([iops->get_acl()])
+ ])
])
])
dnl #
dnl # 3.14 API change,
dnl # Check if inode_operations contains the function set_acl
dnl #
dnl # 5.12 API change,
dnl # set_acl() added a user_namespace* parameter first
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_INODE_OPERATIONS_SET_ACL], [
ZFS_LINUX_TEST_SRC([inode_operations_set_acl_userns], [
#include <linux/fs.h>
int set_acl_fn(struct user_namespace *userns,
struct inode *inode, struct posix_acl *acl,
int type) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.set_acl = set_acl_fn,
};
],[])
ZFS_LINUX_TEST_SRC([inode_operations_set_acl], [
#include <linux/fs.h>
int set_acl_fn(struct inode *inode, struct posix_acl *acl,
int type) { return 0; }
static const struct inode_operations
iops __attribute__ ((unused)) = {
.set_acl = set_acl_fn,
};
],[])
])
AC_DEFUN([ZFS_AC_KERNEL_INODE_OPERATIONS_SET_ACL], [
AC_MSG_CHECKING([whether iops->set_acl() exists])
ZFS_LINUX_TEST_RESULT([inode_operations_set_acl_userns], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SET_ACL, 1, [iops->set_acl() exists])
AC_DEFINE(HAVE_SET_ACL_USERNS, 1, [iops->set_acl() takes 4 args])
],[
ZFS_LINUX_TEST_RESULT([inode_operations_set_acl], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_SET_ACL, 1, [iops->set_acl() exists, takes 3 args])
],[
AC_MSG_RESULT(no)
])
])
])
dnl #
dnl # 4.7 API change,
dnl # The kernel get_acl will now check cache before calling i_op->get_acl and
dnl # do set_cached_acl after that, so i_op->get_acl don't need to do that
dnl # anymore.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_GET_ACL_HANDLE_CACHE], [
ZFS_LINUX_TEST_SRC([get_acl_handle_cache], [
#include <linux/fs.h>
],[
void *sentinel __attribute__ ((unused)) =
uncached_acl_sentinel(NULL);
])
])
AC_DEFUN([ZFS_AC_KERNEL_GET_ACL_HANDLE_CACHE], [
AC_MSG_CHECKING([whether uncached_acl_sentinel() exists])
ZFS_LINUX_TEST_RESULT([get_acl_handle_cache], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_KERNEL_GET_ACL_HANDLE_CACHE, 1,
[uncached_acl_sentinel() exists])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 4.16 kernel: check if struct posix_acl acl.a_refcount is a refcount_t.
dnl # It's an atomic_t on older kernels.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_ACL_HAS_REFCOUNT], [
ZFS_LINUX_TEST_SRC([acl_refcount], [
#include <linux/backing-dev.h>
#include <linux/refcount.h>
#include <linux/posix_acl.h>
],[
struct posix_acl acl;
refcount_t *r __attribute__ ((unused)) = &acl.a_refcount;
])
])
AC_DEFUN([ZFS_AC_KERNEL_ACL_HAS_REFCOUNT], [
AC_MSG_CHECKING([whether posix_acl has refcount_t])
ZFS_LINUX_TEST_RESULT([acl_refcount], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_ACL_REFCOUNT, 1, [posix_acl has refcount_t])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_ACL], [
ZFS_AC_KERNEL_SRC_POSIX_ACL_RELEASE
ZFS_AC_KERNEL_SRC_SET_CACHED_ACL_USABLE
ZFS_AC_KERNEL_SRC_POSIX_ACL_CHMOD
ZFS_AC_KERNEL_SRC_POSIX_ACL_EQUIV_MODE_WANTS_UMODE_T
ZFS_AC_KERNEL_SRC_POSIX_ACL_VALID_WITH_NS
ZFS_AC_KERNEL_SRC_INODE_OPERATIONS_GET_ACL
ZFS_AC_KERNEL_SRC_INODE_OPERATIONS_SET_ACL
ZFS_AC_KERNEL_SRC_GET_ACL_HANDLE_CACHE
ZFS_AC_KERNEL_SRC_ACL_HAS_REFCOUNT
])
AC_DEFUN([ZFS_AC_KERNEL_ACL], [
ZFS_AC_KERNEL_POSIX_ACL_RELEASE
ZFS_AC_KERNEL_SET_CACHED_ACL_USABLE
ZFS_AC_KERNEL_POSIX_ACL_CHMOD
ZFS_AC_KERNEL_POSIX_ACL_EQUIV_MODE_WANTS_UMODE_T
ZFS_AC_KERNEL_POSIX_ACL_VALID_WITH_NS
ZFS_AC_KERNEL_INODE_OPERATIONS_GET_ACL
ZFS_AC_KERNEL_INODE_OPERATIONS_SET_ACL
ZFS_AC_KERNEL_GET_ACL_HANDLE_CACHE
ZFS_AC_KERNEL_ACL_HAS_REFCOUNT
])
diff --git a/sys/contrib/openzfs/config/kernel-blk-queue.m4 b/sys/contrib/openzfs/config/kernel-blk-queue.m4
index 1dced82ce686..ff5d2d370e98 100644
--- a/sys/contrib/openzfs/config/kernel-blk-queue.m4
+++ b/sys/contrib/openzfs/config/kernel-blk-queue.m4
@@ -1,302 +1,342 @@
dnl #
dnl # 2.6.39 API change,
dnl # blk_start_plug() and blk_finish_plug()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_PLUG], [
ZFS_LINUX_TEST_SRC([blk_plug], [
#include <linux/blkdev.h>
],[
struct blk_plug plug __attribute__ ((unused));
blk_start_plug(&plug);
blk_finish_plug(&plug);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_PLUG], [
AC_MSG_CHECKING([whether struct blk_plug is available])
ZFS_LINUX_TEST_RESULT([blk_plug], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([blk_plug])
])
])
dnl #
dnl # 2.6.32 - 4.11: statically allocated bdi in request_queue
dnl # 4.12: dynamically allocated bdi in request_queue
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_BDI], [
ZFS_LINUX_TEST_SRC([blk_queue_bdi], [
#include <linux/blkdev.h>
],[
struct request_queue q;
struct backing_dev_info bdi;
q.backing_dev_info = &bdi;
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_BDI], [
AC_MSG_CHECKING([whether blk_queue bdi is dynamic])
ZFS_LINUX_TEST_RESULT([blk_queue_bdi], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_QUEUE_BDI_DYNAMIC, 1,
[blk queue backing_dev_info is dynamic])
],[
AC_MSG_RESULT(no)
])
])
+dnl #
+dnl # 5.9: added blk_queue_update_readahead(),
+dnl # 5.15: renamed to disk_update_readahead()
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_UPDATE_READAHEAD], [
+ ZFS_LINUX_TEST_SRC([blk_queue_update_readahead], [
+ #include <linux/blkdev.h>
+ ],[
+ struct request_queue q;
+ blk_queue_update_readahead(&q);
+ ])
+
+ ZFS_LINUX_TEST_SRC([disk_update_readahead], [
+ #include <linux/blkdev.h>
+ ],[
+ struct gendisk disk;
+ disk_update_readahead(&disk);
+ ])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_UPDATE_READAHEAD], [
+ AC_MSG_CHECKING([whether blk_queue_update_readahead() exists])
+ ZFS_LINUX_TEST_RESULT([blk_queue_update_readahead], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_BLK_QUEUE_UPDATE_READAHEAD, 1,
+ [blk_queue_update_readahead() exists])
+ ],[
+ AC_MSG_CHECKING([whether disk_update_readahead() exists])
+ ZFS_LINUX_TEST_RESULT([disk_update_readahead], [
+ AC_MSG_RESULT(yes)
+ AC_DEFINE(HAVE_DISK_UPDATE_READAHEAD, 1,
+ [disk_update_readahead() exists])
+ ],[
+ AC_MSG_RESULT(no)
+ ])
+ ])
+])
+
dnl #
dnl # 2.6.32 API,
dnl # blk_queue_discard()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_DISCARD], [
ZFS_LINUX_TEST_SRC([blk_queue_discard], [
#include <linux/blkdev.h>
],[
struct request_queue *q __attribute__ ((unused)) = NULL;
int value __attribute__ ((unused));
value = blk_queue_discard(q);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_DISCARD], [
AC_MSG_CHECKING([whether blk_queue_discard() is available])
ZFS_LINUX_TEST_RESULT([blk_queue_discard], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([blk_queue_discard])
])
])
dnl #
dnl # 4.8 API,
dnl # blk_queue_secure_erase()
dnl #
dnl # 2.6.36 - 4.7 API,
dnl # blk_queue_secdiscard()
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_SECURE_ERASE], [
ZFS_LINUX_TEST_SRC([blk_queue_secure_erase], [
#include <linux/blkdev.h>
],[
struct request_queue *q __attribute__ ((unused)) = NULL;
int value __attribute__ ((unused));
value = blk_queue_secure_erase(q);
])
ZFS_LINUX_TEST_SRC([blk_queue_secdiscard], [
#include <linux/blkdev.h>
],[
struct request_queue *q __attribute__ ((unused)) = NULL;
int value __attribute__ ((unused));
value = blk_queue_secdiscard(q);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_SECURE_ERASE], [
AC_MSG_CHECKING([whether blk_queue_secure_erase() is available])
ZFS_LINUX_TEST_RESULT([blk_queue_secure_erase], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_QUEUE_SECURE_ERASE, 1,
[blk_queue_secure_erase() is available])
],[
AC_MSG_RESULT(no)
AC_MSG_CHECKING([whether blk_queue_secdiscard() is available])
ZFS_LINUX_TEST_RESULT([blk_queue_secdiscard], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_QUEUE_SECDISCARD, 1,
[blk_queue_secdiscard() is available])
],[
ZFS_LINUX_TEST_ERROR([blk_queue_secure_erase])
])
])
])
dnl #
dnl # 4.16 API change,
dnl # Introduction of blk_queue_flag_set and blk_queue_flag_clear
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLAG_SET], [
ZFS_LINUX_TEST_SRC([blk_queue_flag_set], [
#include <linux/kernel.h>
#include <linux/blkdev.h>
],[
struct request_queue *q = NULL;
blk_queue_flag_set(0, q);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_FLAG_SET], [
AC_MSG_CHECKING([whether blk_queue_flag_set() exists])
ZFS_LINUX_TEST_RESULT([blk_queue_flag_set], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_QUEUE_FLAG_SET, 1,
[blk_queue_flag_set() exists])
],[
AC_MSG_RESULT(no)
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLAG_CLEAR], [
ZFS_LINUX_TEST_SRC([blk_queue_flag_clear], [
#include <linux/kernel.h>
#include <linux/blkdev.h>
],[
struct request_queue *q = NULL;
blk_queue_flag_clear(0, q);
])
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_FLAG_CLEAR], [
AC_MSG_CHECKING([whether blk_queue_flag_clear() exists])
ZFS_LINUX_TEST_RESULT([blk_queue_flag_clear], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_QUEUE_FLAG_CLEAR, 1,
[blk_queue_flag_clear() exists])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.36 API change,
dnl # Added blk_queue_flush() interface, while the previous interface
dnl # was available to all the new one is GPL-only. Thus in addition to
dnl # detecting if this function is available we determine if it is
dnl # GPL-only. If the GPL-only interface is there we implement our own
dnl # compatibility function, otherwise we use the function. The hope
dnl # is that long term this function will be opened up.
dnl #
dnl # 4.7 API change,
dnl # Replace blk_queue_flush with blk_queue_write_cache
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLUSH], [
ZFS_LINUX_TEST_SRC([blk_queue_flush], [
#include <linux/blkdev.h>
], [
struct request_queue *q = NULL;
(void) blk_queue_flush(q, REQ_FLUSH);
], [$NO_UNUSED_BUT_SET_VARIABLE], [ZFS_META_LICENSE])
ZFS_LINUX_TEST_SRC([blk_queue_write_cache], [
#include <linux/kernel.h>
#include <linux/blkdev.h>
], [
struct request_queue *q = NULL;
blk_queue_write_cache(q, true, true);
], [$NO_UNUSED_BUT_SET_VARIABLE], [ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_FLUSH], [
AC_MSG_CHECKING([whether blk_queue_flush() is available])
ZFS_LINUX_TEST_RESULT([blk_queue_flush], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_QUEUE_FLUSH, 1,
[blk_queue_flush() is available])
AC_MSG_CHECKING([whether blk_queue_flush() is GPL-only])
ZFS_LINUX_TEST_RESULT([blk_queue_flush_license], [
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY, 1,
[blk_queue_flush() is GPL-only])
])
],[
AC_MSG_RESULT(no)
])
dnl #
dnl # 4.7 API change
dnl # Replace blk_queue_flush with blk_queue_write_cache
dnl #
AC_MSG_CHECKING([whether blk_queue_write_cache() exists])
ZFS_LINUX_TEST_RESULT([blk_queue_write_cache], [
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_QUEUE_WRITE_CACHE, 1,
[blk_queue_write_cache() exists])
AC_MSG_CHECKING([whether blk_queue_write_cache() is GPL-only])
ZFS_LINUX_TEST_RESULT([blk_queue_write_cache_license], [
AC_MSG_RESULT(no)
],[
AC_MSG_RESULT(yes)
AC_DEFINE(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY, 1,
[blk_queue_write_cache() is GPL-only])
])
],[
AC_MSG_RESULT(no)
])
])
dnl #
dnl # 2.6.34 API change
dnl # blk_queue_max_hw_sectors() replaces blk_queue_max_sectors().
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_HW_SECTORS], [
ZFS_LINUX_TEST_SRC([blk_queue_max_hw_sectors], [
#include <linux/blkdev.h>
], [
struct request_queue *q = NULL;
(void) blk_queue_max_hw_sectors(q, BLK_SAFE_MAX_SECTORS);
], [$NO_UNUSED_BUT_SET_VARIABLE])
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_MAX_HW_SECTORS], [
AC_MSG_CHECKING([whether blk_queue_max_hw_sectors() is available])
ZFS_LINUX_TEST_RESULT([blk_queue_max_hw_sectors], [
AC_MSG_RESULT(yes)
],[
ZFS_LINUX_TEST_ERROR([blk_queue_max_hw_sectors])
])
])
dnl #
dnl # 2.6.34 API change
dnl # blk_queue_max_segments() consolidates blk_queue_max_hw_segments()
dnl # and blk_queue_max_phys_segments().
dnl #
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_SEGMENTS], [
ZFS_LINUX_TEST_SRC([blk_queue_max_segments], [
#include <linux/blkdev.h>
], [
struct request_queue *q = NULL;
(void) blk_queue_max_segments(q, BLK_MAX_SEGMENTS);
], [$NO_UNUSED_BUT_SET_VARIABLE])
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS], [
AC_MSG_CHECKING([whether blk_queue_max_segments() is available])
ZFS_LINUX_TEST_RESULT([blk_queue_max_segments], [
AC_MSG_RESULT(yes)
], [
ZFS_LINUX_TEST_ERROR([blk_queue_max_segments])
])
])
AC_DEFUN([ZFS_AC_KERNEL_SRC_BLK_QUEUE], [
ZFS_AC_KERNEL_SRC_BLK_QUEUE_PLUG
ZFS_AC_KERNEL_SRC_BLK_QUEUE_BDI
+ ZFS_AC_KERNEL_SRC_BLK_QUEUE_UPDATE_READAHEAD
ZFS_AC_KERNEL_SRC_BLK_QUEUE_DISCARD
ZFS_AC_KERNEL_SRC_BLK_QUEUE_SECURE_ERASE
ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLAG_SET
ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLAG_CLEAR
ZFS_AC_KERNEL_SRC_BLK_QUEUE_FLUSH
ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_HW_SECTORS
ZFS_AC_KERNEL_SRC_BLK_QUEUE_MAX_SEGMENTS
])
AC_DEFUN([ZFS_AC_KERNEL_BLK_QUEUE], [
ZFS_AC_KERNEL_BLK_QUEUE_PLUG
ZFS_AC_KERNEL_BLK_QUEUE_BDI
+ ZFS_AC_KERNEL_BLK_QUEUE_UPDATE_READAHEAD
ZFS_AC_KERNEL_BLK_QUEUE_DISCARD
ZFS_AC_KERNEL_BLK_QUEUE_SECURE_ERASE
ZFS_AC_KERNEL_BLK_QUEUE_FLAG_SET
ZFS_AC_KERNEL_BLK_QUEUE_FLAG_CLEAR
ZFS_AC_KERNEL_BLK_QUEUE_FLUSH
ZFS_AC_KERNEL_BLK_QUEUE_MAX_HW_SECTORS
ZFS_AC_KERNEL_BLK_QUEUE_MAX_SEGMENTS
])
diff --git a/sys/contrib/openzfs/config/kernel-stdarg.m4 b/sys/contrib/openzfs/config/kernel-stdarg.m4
new file mode 100644
index 000000000000..5bc8dd859d6b
--- /dev/null
+++ b/sys/contrib/openzfs/config/kernel-stdarg.m4
@@ -0,0 +1,32 @@
+dnl #
+dnl # Linux 5.15 gets rid of -isystem and external <stdarg.h> inclusion
+dnl # and ships its own <linux/stdarg.h>. Check if this header file does
+dnl # exist and provide all necessary definitions for variable argument
+dnl # functions. Adjust the inclusion of <stdarg.h> according to the
+dnl # results.
+dnl #
+AC_DEFUN([ZFS_AC_KERNEL_SRC_STANDALONE_LINUX_STDARG], [
+ ZFS_LINUX_TEST_SRC([has_standalone_linux_stdarg], [
+ #include <linux/stdarg.h>
+
+ #if !defined(va_start) || !defined(va_end) || \
+ !defined(va_arg) || !defined(va_copy)
+ #error "<linux/stdarg.h> is invalid"
+ #endif
+ ],[])
+])
+
+AC_DEFUN([ZFS_AC_KERNEL_STANDALONE_LINUX_STDARG], [
+ dnl #
+ dnl # Linux 5.15 ships its own stdarg.h and doesn't allow to
+ dnl # include compiler headers.
+ dnl #
+ AC_MSG_CHECKING([whether standalone <linux/stdarg.h> exists])
+ ZFS_LINUX_TEST_RESULT([has_standalone_linux_stdarg], [
+ AC_MSG_RESULT([yes])
+ AC_DEFINE(HAVE_STANDALONE_LINUX_STDARG, 1,
+ [standalone <linux/stdarg.h> exists])
+ ],[
+ AC_MSG_RESULT([no])
+ ])
+])
diff --git a/sys/contrib/openzfs/config/kernel.m4 b/sys/contrib/openzfs/config/kernel.m4
index 5ea2286dbcc3..0b94f3bd9cb6 100644
--- a/sys/contrib/openzfs/config/kernel.m4
+++ b/sys/contrib/openzfs/config/kernel.m4
@@ -1,889 +1,891 @@
dnl #
dnl # Default ZFS kernel configuration
dnl #
AC_DEFUN([ZFS_AC_CONFIG_KERNEL], [
AM_COND_IF([BUILD_LINUX], [
dnl # Setup the kernel build environment.
ZFS_AC_KERNEL
ZFS_AC_QAT
dnl # Sanity checks for module building and CONFIG_* defines
ZFS_AC_KERNEL_TEST_MODULE
ZFS_AC_KERNEL_CONFIG_DEFINED
dnl # Sequential ZFS_LINUX_TRY_COMPILE tests
ZFS_AC_KERNEL_FPU_HEADER
ZFS_AC_KERNEL_OBJTOOL_HEADER
ZFS_AC_KERNEL_WAIT_QUEUE_ENTRY_T
ZFS_AC_KERNEL_MISC_MINOR
ZFS_AC_KERNEL_DECLARE_EVENT_CLASS
dnl # Parallel ZFS_LINUX_TEST_SRC / ZFS_LINUX_TEST_RESULT tests
ZFS_AC_KERNEL_TEST_SRC
ZFS_AC_KERNEL_TEST_RESULT
AS_IF([test "$LINUX_OBJ" != "$LINUX"], [
KERNEL_MAKE="$KERNEL_MAKE O=$LINUX_OBJ"
])
AC_SUBST(KERNEL_MAKE)
])
])
dnl #
dnl # Generate and compile all of the kernel API test cases to determine
dnl # which interfaces are available. By invoking the kernel build system
dnl # only once the compilation can be done in parallel significantly
dnl # speeding up the process.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_SRC], [
ZFS_AC_KERNEL_SRC_OBJTOOL
ZFS_AC_KERNEL_SRC_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_SRC_ACCESS_OK_TYPE
ZFS_AC_KERNEL_SRC_PDE_DATA
ZFS_AC_KERNEL_SRC_FALLOCATE
ZFS_AC_KERNEL_SRC_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_SRC_RWSEM
ZFS_AC_KERNEL_SRC_SCHED
ZFS_AC_KERNEL_SRC_USLEEP_RANGE
ZFS_AC_KERNEL_SRC_KMEM_CACHE
ZFS_AC_KERNEL_SRC_KVMALLOC
ZFS_AC_KERNEL_SRC_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_SRC_WAIT
ZFS_AC_KERNEL_SRC_INODE_TIMES
ZFS_AC_KERNEL_SRC_INODE_LOCK
ZFS_AC_KERNEL_SRC_GROUP_INFO_GID
ZFS_AC_KERNEL_SRC_RW
ZFS_AC_KERNEL_SRC_TIMER_SETUP
ZFS_AC_KERNEL_SRC_SUPER_USER_NS
ZFS_AC_KERNEL_SRC_PROC_OPERATIONS
ZFS_AC_KERNEL_SRC_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_SRC_BIO
ZFS_AC_KERNEL_SRC_BLKDEV
ZFS_AC_KERNEL_SRC_BLK_QUEUE
ZFS_AC_KERNEL_SRC_REVALIDATE_DISK
ZFS_AC_KERNEL_SRC_GET_DISK_RO
ZFS_AC_KERNEL_SRC_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_SRC_DISCARD_GRANULARITY
ZFS_AC_KERNEL_SRC_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_SRC_XATTR
ZFS_AC_KERNEL_SRC_ACL
ZFS_AC_KERNEL_SRC_INODE_GETATTR
ZFS_AC_KERNEL_SRC_INODE_SET_FLAGS
ZFS_AC_KERNEL_SRC_INODE_SET_IVERSION
ZFS_AC_KERNEL_SRC_SHOW_OPTIONS
ZFS_AC_KERNEL_SRC_FILE_INODE
ZFS_AC_KERNEL_SRC_FILE_DENTRY
ZFS_AC_KERNEL_SRC_FSYNC
ZFS_AC_KERNEL_SRC_AIO_FSYNC
ZFS_AC_KERNEL_SRC_EVICT_INODE
ZFS_AC_KERNEL_SRC_DIRTY_INODE
ZFS_AC_KERNEL_SRC_SHRINKER
ZFS_AC_KERNEL_SRC_MKDIR
ZFS_AC_KERNEL_SRC_LOOKUP_FLAGS
ZFS_AC_KERNEL_SRC_CREATE
ZFS_AC_KERNEL_SRC_GET_LINK
ZFS_AC_KERNEL_SRC_PUT_LINK
ZFS_AC_KERNEL_SRC_TMPFILE
ZFS_AC_KERNEL_SRC_AUTOMOUNT
ZFS_AC_KERNEL_SRC_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_SRC_COMMIT_METADATA
ZFS_AC_KERNEL_SRC_CLEAR_INODE
ZFS_AC_KERNEL_SRC_SETATTR_PREPARE
ZFS_AC_KERNEL_SRC_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_SRC_DENTRY
ZFS_AC_KERNEL_SRC_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SRC_SECURITY_INODE
ZFS_AC_KERNEL_SRC_FST_MOUNT
ZFS_AC_KERNEL_SRC_BDI
ZFS_AC_KERNEL_SRC_SET_NLINK
ZFS_AC_KERNEL_SRC_SGET
ZFS_AC_KERNEL_SRC_LSEEK_EXECUTE
ZFS_AC_KERNEL_SRC_VFS_GETATTR
ZFS_AC_KERNEL_SRC_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_SRC_VFS_ITERATE
ZFS_AC_KERNEL_SRC_VFS_DIRECT_IO
ZFS_AC_KERNEL_SRC_VFS_RW_ITERATE
ZFS_AC_KERNEL_SRC_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_SRC_VFS_IOV_ITER
ZFS_AC_KERNEL_SRC_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_SRC_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_SRC_MAKE_REQUEST_FN
ZFS_AC_KERNEL_SRC_GENERIC_IO_ACCT
ZFS_AC_KERNEL_SRC_FPU
ZFS_AC_KERNEL_SRC_FMODE_T
ZFS_AC_KERNEL_SRC_KUIDGID_T
ZFS_AC_KERNEL_SRC_KUID_HELPERS
ZFS_AC_KERNEL_SRC_MODULE_PARAM_CALL_CONST
ZFS_AC_KERNEL_SRC_RENAME
ZFS_AC_KERNEL_SRC_CURRENT_TIME
ZFS_AC_KERNEL_SRC_USERNS_CAPABILITIES
ZFS_AC_KERNEL_SRC_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_SRC_KTIME
ZFS_AC_KERNEL_SRC_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_SRC_TOTALHIGH_PAGES
ZFS_AC_KERNEL_SRC_KSTRTOUL
ZFS_AC_KERNEL_SRC_PERCPU
ZFS_AC_KERNEL_SRC_CPU_HOTPLUG
ZFS_AC_KERNEL_SRC_GENERIC_FILLATTR_USERNS
ZFS_AC_KERNEL_SRC_MKNOD
ZFS_AC_KERNEL_SRC_SYMLINK
ZFS_AC_KERNEL_SRC_BIO_MAX_SEGS
ZFS_AC_KERNEL_SRC_SIGNAL_STOP
ZFS_AC_KERNEL_SRC_SIGINFO
ZFS_AC_KERNEL_SRC_SET_SPECIAL_STATE
ZFS_AC_KERNEL_SRC_VFS_SET_PAGE_DIRTY_NOBUFFERS
+ ZFS_AC_KERNEL_SRC_STANDALONE_LINUX_STDARG
AC_MSG_CHECKING([for available kernel interfaces])
ZFS_LINUX_TEST_COMPILE_ALL([kabi])
AC_MSG_RESULT([done])
])
dnl #
dnl # Check results of kernel interface tests.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_RESULT], [
ZFS_AC_KERNEL_ACCESS_OK_TYPE
ZFS_AC_KERNEL_GLOBAL_PAGE_STATE
ZFS_AC_KERNEL_OBJTOOL
ZFS_AC_KERNEL_PDE_DATA
ZFS_AC_KERNEL_FALLOCATE
ZFS_AC_KERNEL_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE
ZFS_AC_KERNEL_RWSEM
ZFS_AC_KERNEL_SCHED
ZFS_AC_KERNEL_USLEEP_RANGE
ZFS_AC_KERNEL_KMEM_CACHE
ZFS_AC_KERNEL_KVMALLOC
ZFS_AC_KERNEL_VMALLOC_PAGE_KERNEL
ZFS_AC_KERNEL_WAIT
ZFS_AC_KERNEL_INODE_TIMES
ZFS_AC_KERNEL_INODE_LOCK
ZFS_AC_KERNEL_GROUP_INFO_GID
ZFS_AC_KERNEL_RW
ZFS_AC_KERNEL_TIMER_SETUP
ZFS_AC_KERNEL_SUPER_USER_NS
ZFS_AC_KERNEL_PROC_OPERATIONS
ZFS_AC_KERNEL_BLOCK_DEVICE_OPERATIONS
ZFS_AC_KERNEL_BIO
ZFS_AC_KERNEL_BLKDEV
ZFS_AC_KERNEL_BLK_QUEUE
ZFS_AC_KERNEL_REVALIDATE_DISK
ZFS_AC_KERNEL_GET_DISK_RO
ZFS_AC_KERNEL_GENERIC_READLINK_GLOBAL
ZFS_AC_KERNEL_DISCARD_GRANULARITY
ZFS_AC_KERNEL_INODE_OWNER_OR_CAPABLE
ZFS_AC_KERNEL_XATTR
ZFS_AC_KERNEL_ACL
ZFS_AC_KERNEL_INODE_GETATTR
ZFS_AC_KERNEL_INODE_SET_FLAGS
ZFS_AC_KERNEL_INODE_SET_IVERSION
ZFS_AC_KERNEL_SHOW_OPTIONS
ZFS_AC_KERNEL_FILE_INODE
ZFS_AC_KERNEL_FILE_DENTRY
ZFS_AC_KERNEL_FSYNC
ZFS_AC_KERNEL_AIO_FSYNC
ZFS_AC_KERNEL_EVICT_INODE
ZFS_AC_KERNEL_DIRTY_INODE
ZFS_AC_KERNEL_SHRINKER
ZFS_AC_KERNEL_MKDIR
ZFS_AC_KERNEL_LOOKUP_FLAGS
ZFS_AC_KERNEL_CREATE
ZFS_AC_KERNEL_GET_LINK
ZFS_AC_KERNEL_PUT_LINK
ZFS_AC_KERNEL_TMPFILE
ZFS_AC_KERNEL_AUTOMOUNT
ZFS_AC_KERNEL_ENCODE_FH_WITH_INODE
ZFS_AC_KERNEL_COMMIT_METADATA
ZFS_AC_KERNEL_CLEAR_INODE
ZFS_AC_KERNEL_SETATTR_PREPARE
ZFS_AC_KERNEL_INSERT_INODE_LOCKED
ZFS_AC_KERNEL_DENTRY
ZFS_AC_KERNEL_TRUNCATE_SETSIZE
ZFS_AC_KERNEL_SECURITY_INODE
ZFS_AC_KERNEL_FST_MOUNT
ZFS_AC_KERNEL_BDI
ZFS_AC_KERNEL_SET_NLINK
ZFS_AC_KERNEL_SGET
ZFS_AC_KERNEL_LSEEK_EXECUTE
ZFS_AC_KERNEL_VFS_GETATTR
ZFS_AC_KERNEL_VFS_FSYNC_2ARGS
ZFS_AC_KERNEL_VFS_ITERATE
ZFS_AC_KERNEL_VFS_DIRECT_IO
ZFS_AC_KERNEL_VFS_RW_ITERATE
ZFS_AC_KERNEL_VFS_GENERIC_WRITE_CHECKS
ZFS_AC_KERNEL_VFS_IOV_ITER
ZFS_AC_KERNEL_KMAP_ATOMIC_ARGS
ZFS_AC_KERNEL_FOLLOW_DOWN_ONE
ZFS_AC_KERNEL_MAKE_REQUEST_FN
ZFS_AC_KERNEL_GENERIC_IO_ACCT
ZFS_AC_KERNEL_FPU
ZFS_AC_KERNEL_FMODE_T
ZFS_AC_KERNEL_KUIDGID_T
ZFS_AC_KERNEL_KUID_HELPERS
ZFS_AC_KERNEL_MODULE_PARAM_CALL_CONST
ZFS_AC_KERNEL_RENAME
ZFS_AC_KERNEL_CURRENT_TIME
ZFS_AC_KERNEL_USERNS_CAPABILITIES
ZFS_AC_KERNEL_IN_COMPAT_SYSCALL
ZFS_AC_KERNEL_KTIME
ZFS_AC_KERNEL_TOTALRAM_PAGES_FUNC
ZFS_AC_KERNEL_TOTALHIGH_PAGES
ZFS_AC_KERNEL_KSTRTOUL
ZFS_AC_KERNEL_PERCPU
ZFS_AC_KERNEL_CPU_HOTPLUG
ZFS_AC_KERNEL_GENERIC_FILLATTR_USERNS
ZFS_AC_KERNEL_MKNOD
ZFS_AC_KERNEL_SYMLINK
ZFS_AC_KERNEL_BIO_MAX_SEGS
ZFS_AC_KERNEL_SIGNAL_STOP
ZFS_AC_KERNEL_SIGINFO
ZFS_AC_KERNEL_SET_SPECIAL_STATE
ZFS_AC_KERNEL_VFS_SET_PAGE_DIRTY_NOBUFFERS
+ ZFS_AC_KERNEL_STANDALONE_LINUX_STDARG
])
dnl #
dnl # Detect name used for Module.symvers file in kernel
dnl #
AC_DEFUN([ZFS_AC_MODULE_SYMVERS], [
modpost=$LINUX/scripts/Makefile.modpost
AC_MSG_CHECKING([kernel file name for module symbols])
AS_IF([test "x$enable_linux_builtin" != xyes -a -f "$modpost"], [
AS_IF([grep -q Modules.symvers $modpost], [
LINUX_SYMBOLS=Modules.symvers
], [
LINUX_SYMBOLS=Module.symvers
])
AS_IF([test ! -f "$LINUX_OBJ/$LINUX_SYMBOLS"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed. If you are building with a custom kernel, make sure
*** the kernel is configured, built, and the '--with-linux=PATH'
*** configure option refers to the location of the kernel source.
])
])
], [
LINUX_SYMBOLS=NONE
])
AC_MSG_RESULT($LINUX_SYMBOLS)
AC_SUBST(LINUX_SYMBOLS)
])
dnl #
dnl # Detect the kernel to be built against
dnl #
AC_DEFUN([ZFS_AC_KERNEL], [
AC_ARG_WITH([linux],
AS_HELP_STRING([--with-linux=PATH],
[Path to kernel source]),
[kernelsrc="$withval"])
AC_ARG_WITH(linux-obj,
AS_HELP_STRING([--with-linux-obj=PATH],
[Path to kernel build objects]),
[kernelbuild="$withval"])
AC_MSG_CHECKING([kernel source directory])
AS_IF([test -z "$kernelsrc"], [
AS_IF([test -e "/lib/modules/$(uname -r)/source"], [
headersdir="/lib/modules/$(uname -r)/source"
sourcelink=$(readlink -f "$headersdir")
], [test -e "/lib/modules/$(uname -r)/build"], [
headersdir="/lib/modules/$(uname -r)/build"
sourcelink=$(readlink -f "$headersdir")
], [
sourcelink=$(ls -1d /usr/src/kernels/* \
/usr/src/linux-* \
2>/dev/null | grep -v obj | tail -1)
])
AS_IF([test -n "$sourcelink" && test -e ${sourcelink}], [
kernelsrc=`readlink -f ${sourcelink}`
], [
kernelsrc="[Not found]"
])
], [
AS_IF([test "$kernelsrc" = "NONE"], [
kernsrcver=NONE
])
withlinux=yes
])
AC_MSG_RESULT([$kernelsrc])
AS_IF([test ! -d "$kernelsrc"], [
AC_MSG_ERROR([
*** Please make sure the kernel devel package for your distribution
*** is installed and then try again. If that fails, you can specify the
*** location of the kernel source with the '--with-linux=PATH' option.])
])
AC_MSG_CHECKING([kernel build directory])
AS_IF([test -z "$kernelbuild"], [
AS_IF([test x$withlinux != xyes -a -e "/lib/modules/$(uname -r)/build"], [
kernelbuild=`readlink -f /lib/modules/$(uname -r)/build`
], [test -d ${kernelsrc}-obj/${target_cpu}/${target_cpu}], [
kernelbuild=${kernelsrc}-obj/${target_cpu}/${target_cpu}
], [test -d ${kernelsrc}-obj/${target_cpu}/default], [
kernelbuild=${kernelsrc}-obj/${target_cpu}/default
], [test -d `dirname ${kernelsrc}`/build-${target_cpu}], [
kernelbuild=`dirname ${kernelsrc}`/build-${target_cpu}
], [
kernelbuild=${kernelsrc}
])
])
AC_MSG_RESULT([$kernelbuild])
AC_MSG_CHECKING([kernel source version])
utsrelease1=$kernelbuild/include/linux/version.h
utsrelease2=$kernelbuild/include/linux/utsrelease.h
utsrelease3=$kernelbuild/include/generated/utsrelease.h
AS_IF([test -r $utsrelease1 && fgrep -q UTS_RELEASE $utsrelease1], [
utsrelease=$utsrelease1
], [test -r $utsrelease2 && fgrep -q UTS_RELEASE $utsrelease2], [
utsrelease=$utsrelease2
], [test -r $utsrelease3 && fgrep -q UTS_RELEASE $utsrelease3], [
utsrelease=$utsrelease3
])
AS_IF([test -n "$utsrelease"], [
kernsrcver=$($AWK '/UTS_RELEASE/ { gsub(/"/, "", $[3]); print $[3] }' $utsrelease)
AS_IF([test -z "$kernsrcver"], [
AC_MSG_RESULT([Not found])
AC_MSG_ERROR([
*** Cannot determine kernel version.
])
])
], [
AC_MSG_RESULT([Not found])
if test "x$enable_linux_builtin" != xyes; then
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
])
else
AC_MSG_ERROR([
*** Cannot find UTS_RELEASE definition.
*** Please run 'make prepare' inside the kernel source tree.])
fi
])
AC_MSG_RESULT([$kernsrcver])
AS_VERSION_COMPARE([$kernsrcver], [$ZFS_META_KVER_MIN], [
AC_MSG_ERROR([
*** Cannot build against kernel version $kernsrcver.
*** The minimum supported kernel version is $ZFS_META_KVER_MIN.
])
])
LINUX=${kernelsrc}
LINUX_OBJ=${kernelbuild}
LINUX_VERSION=${kernsrcver}
AC_SUBST(LINUX)
AC_SUBST(LINUX_OBJ)
AC_SUBST(LINUX_VERSION)
ZFS_AC_MODULE_SYMVERS
])
dnl #
dnl # Detect the QAT module to be built against, QAT provides hardware
dnl # acceleration for data compression:
dnl #
dnl # https://01.org/intel-quickassist-technology
dnl #
dnl # 1) Download and install QAT driver from the above link
dnl # 2) Start QAT driver in your system:
dnl # service qat_service start
dnl # 3) Enable QAT in ZFS, e.g.:
dnl # ./configure --with-qat=<qat-driver-path>/QAT1.6
dnl # make
dnl # 4) Set GZIP compression in ZFS dataset:
dnl # zfs set compression = gzip <dataset>
dnl #
dnl # Then the data written to this ZFS pool is compressed by QAT accelerator
dnl # automatically, and de-compressed by QAT when read from the pool.
dnl #
dnl # 1) Get QAT hardware statistics with:
dnl # cat /proc/icp_dh895xcc_dev/qat
dnl # 2) To disable QAT:
dnl # insmod zfs.ko zfs_qat_disable=1
dnl #
AC_DEFUN([ZFS_AC_QAT], [
AC_ARG_WITH([qat],
AS_HELP_STRING([--with-qat=PATH],
[Path to qat source]),
AS_IF([test "$withval" = "yes"],
AC_MSG_ERROR([--with-qat=PATH requires a PATH]),
[qatsrc="$withval"]))
AC_ARG_WITH([qat-obj],
AS_HELP_STRING([--with-qat-obj=PATH],
[Path to qat build objects]),
[qatbuild="$withval"])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat source directory])
AC_MSG_RESULT([$qatsrc])
QAT_SRC="${qatsrc}/quickassist"
AS_IF([ test ! -e "$QAT_SRC/include/cpa.h"], [
AC_MSG_ERROR([
*** Please make sure the qat driver package is installed
*** and specify the location of the qat source with the
*** '--with-qat=PATH' option then try again. Failed to
*** find cpa.h in:
${QAT_SRC}/include])
])
])
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat build directory])
AS_IF([test -z "$qatbuild"], [
qatbuild="${qatsrc}/build"
])
AC_MSG_RESULT([$qatbuild])
QAT_OBJ=${qatbuild}
AS_IF([ ! test -e "$QAT_OBJ/icp_qa_al.ko" && ! test -e "$QAT_OBJ/qat_api.ko"], [
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find icp_qa_al.ko or qat_api.ko in:
$QAT_OBJ])
])
AC_SUBST(QAT_SRC)
AC_SUBST(QAT_OBJ)
AC_DEFINE(HAVE_QAT, 1,
[qat is enabled and existed])
])
dnl #
dnl # Detect the name used for the QAT Module.symvers file.
dnl #
AS_IF([test ! -z "${qatsrc}"], [
AC_MSG_CHECKING([qat file for module symbols])
QAT_SYMBOLS=$QAT_SRC/lookaside/access_layer/src/Module.symvers
AS_IF([test -r $QAT_SYMBOLS], [
AC_MSG_RESULT([$QAT_SYMBOLS])
AC_SUBST(QAT_SYMBOLS)
],[
AC_MSG_ERROR([
*** Please make sure the qat driver is installed then try again.
*** Failed to find Module.symvers in:
$QAT_SYMBOLS
])
])
])
])
dnl #
dnl # Basic toolchain sanity check.
dnl #
AC_DEFUN([ZFS_AC_KERNEL_TEST_MODULE], [
AC_MSG_CHECKING([whether modules can be built])
ZFS_LINUX_TRY_COMPILE([], [], [
AC_MSG_RESULT([yes])
],[
AC_MSG_RESULT([no])
if test "x$enable_linux_builtin" != xyes; then
AC_MSG_ERROR([
*** Unable to build an empty module.
])
else
AC_MSG_ERROR([
*** Unable to build an empty module.
*** Please run 'make scripts' inside the kernel source tree.])
fi
])
])
dnl #
dnl # ZFS_LINUX_CONFTEST_H
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_H], [
test -d build/$2 || mkdir -p build/$2
cat - <<_ACEOF >build/$2/$2.h
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_C
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_C], [
test -d build/$2 || mkdir -p build/$2
cat confdefs.h - <<_ACEOF >build/$2/$2.c
$1
_ACEOF
])
dnl #
dnl # ZFS_LINUX_CONFTEST_MAKEFILE
dnl #
dnl # $1 - test case name
dnl # $2 - add to top-level Makefile
dnl # $3 - additional build flags
dnl #
AC_DEFUN([ZFS_LINUX_CONFTEST_MAKEFILE], [
test -d build || mkdir -p build
test -d build/$1 || mkdir -p build/$1
file=build/$1/Makefile
dnl # Example command line to manually build source.
cat - <<_ACEOF >$file
# Example command line to manually build source
# make modules -C $LINUX_OBJ $ARCH_UM M=$PWD/build/$1
ccflags-y := -Werror $FRAME_LARGER_THAN
_ACEOF
dnl # Additional custom CFLAGS as requested.
m4_ifval($3, [echo "ccflags-y += $3" >>$file], [])
dnl # Test case source
echo "obj-m := $1.o" >>$file
AS_IF([test "x$2" = "xyes"], [echo "obj-m += $1/" >>build/Makefile], [])
])
dnl #
dnl # ZFS_LINUX_TEST_PROGRAM(C)([PROLOGUE], [BODY])
dnl #
m4_define([ZFS_LINUX_TEST_PROGRAM], [
#include <linux/module.h>
$1
int
main (void)
{
$2
;
return 0;
}
MODULE_DESCRIPTION("conftest");
MODULE_AUTHOR(ZFS_META_AUTHOR);
MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);
MODULE_LICENSE($3);
])
dnl #
dnl # ZFS_LINUX_TEST_REMOVE
dnl #
dnl # Removes the specified test source and results.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_REMOVE], [
test -d build/$1 && rm -Rf build/$1
test -f build/Makefile && sed '/$1/d' build/Makefile
])
dnl #
dnl # ZFS_LINUX_COMPILE
dnl #
dnl # $1 - build dir
dnl # $2 - test command
dnl # $3 - pass command
dnl # $4 - fail command
dnl # $5 - set KBUILD_MODPOST_NOFINAL='yes'
dnl # $6 - set KBUILD_MODPOST_WARN='yes'
dnl #
dnl # Used internally by ZFS_LINUX_TEST_{COMPILE,MODPOST}
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE], [
AC_TRY_COMMAND([
KBUILD_MODPOST_NOFINAL="$5" KBUILD_MODPOST_WARN="$6"
make modules -k -j$TEST_JOBS -C $LINUX_OBJ $ARCH_UM
M=$PWD/$1 >$1/build.log 2>&1])
AS_IF([AC_TRY_COMMAND([$2])], [$3], [$4])
])
dnl #
dnl # ZFS_LINUX_TEST_COMPILE
dnl #
dnl # Perform a full compile excluding the final modpost phase.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.compile.$1
mv $2/build.log $2/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to compile test source to determine kernel interfaces.])
], [yes], [])
])
dnl #
dnl # ZFS_LINUX_TEST_MODPOST
dnl #
dnl # Perform a full compile including the modpost phase. This may
dnl # be an incremental build if the objects have already been built.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_MODPOST], [
ZFS_LINUX_COMPILE([$2], [test -f $2/build.log], [
mv $2/Makefile $2/Makefile.modpost.$1
cat $2/build.log >>build/build.log.$1
],[
AC_MSG_ERROR([
*** Unable to modpost test source to determine kernel interfaces.])
], [], [yes])
])
dnl #
dnl # Perform the compilation of the test cases in two phases.
dnl #
dnl # Phase 1) attempt to build the object files for all of the tests
dnl # defined by the ZFS_LINUX_TEST_SRC macro. But do not
dnl # perform the final modpost stage.
dnl #
dnl # Phase 2) disable all tests which failed the initial compilation,
dnl # then invoke the final modpost step for the remaining tests.
dnl #
dnl # This allows us efficiently build the test cases in parallel while
dnl # remaining resilient to build failures which are expected when
dnl # detecting the available kernel interfaces.
dnl #
dnl # The maximum allowed parallelism can be controlled by setting the
dnl # TEST_JOBS environment variable. Otherwise, it default to $(nproc).
dnl #
AC_DEFUN([ZFS_LINUX_TEST_COMPILE_ALL], [
dnl # Phase 1 - Compilation only, final linking is skipped.
ZFS_LINUX_TEST_COMPILE([$1], [build])
dnl #
dnl # Phase 2 - When building external modules disable test cases
dnl # which failed to compile and invoke modpost to verify the
dnl # final linking.
dnl #
dnl # Test names suffixed with '_license' call modpost independently
dnl # to ensure that a single incompatibility does not result in the
dnl # modpost phase exiting early. This check is not performed on
dnl # every symbol since the majority are compatible and doing so
dnl # would significantly slow down this phase.
dnl #
dnl # When configuring for builtin (--enable-linux-builtin)
dnl # fake the linking step artificially create the expected .ko
dnl # files for tests which did compile. This is required for
dnl # kernels which do not have loadable module support or have
dnl # not yet been built.
dnl #
AS_IF([test "x$enable_linux_builtin" = "xno"], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
AS_IF([test "${name##*_}" = "license"], [
ZFS_LINUX_TEST_MODPOST([$1],
[build/$name])
echo "obj-n += $dir" >>build/Makefile
], [
echo "obj-m += $dir" >>build/Makefile
])
], [
echo "obj-n += $dir" >>build/Makefile
])
done
ZFS_LINUX_TEST_MODPOST([$1], [build])
], [
for dir in $(awk '/^obj-m/ { print [$]3 }' \
build/Makefile.compile.$1); do
name=${dir%/}
AS_IF([test -f build/$name/$name.o], [
touch build/$name/$name.ko
])
done
])
])
dnl #
dnl # ZFS_LINUX_TEST_SRC
dnl #
dnl # $1 - name
dnl # $2 - global
dnl # $3 - source
dnl # $4 - extra cflags
dnl # $5 - check license-compatibility
dnl #
dnl # Check if the test source is buildable at all and then if it is
dnl # license compatible.
dnl #
dnl # N.B because all of the test cases are compiled in parallel they
dnl # must never depend on the results of previous tests. Each test
dnl # needs to be entirely independent.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_SRC], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM([[$2]], [[$3]],
[["Dual BSD/GPL"]])], [$1])
ZFS_LINUX_CONFTEST_MAKEFILE([$1], [yes], [$4])
AS_IF([ test -n "$5" ], [
ZFS_LINUX_CONFTEST_C([ZFS_LINUX_TEST_PROGRAM(
[[$2]], [[$3]], [[$5]])], [$1_license])
ZFS_LINUX_CONFTEST_MAKEFILE([$1_license], [yes], [$4])
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT
dnl #
dnl # $1 - name of a test source (ZFS_LINUX_TEST_SRC)
dnl # $2 - run on success (valid .ko generated)
dnl # $3 - run on failure (unable to compile)
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT], [
AS_IF([test -d build/$1], [
AS_IF([test -f build/$1/$1.ko], [$2], [$3])
], [
AC_MSG_ERROR([
*** No matching source for the "$1" test, check that
*** both the test source and result macros refer to the same name.
])
])
])
dnl #
dnl # ZFS_LINUX_TEST_ERROR
dnl #
dnl # Generic error message which can be used when none of the expected
dnl # kernel interfaces were detected.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_ERROR], [
AC_MSG_ERROR([
*** None of the expected "$1" interfaces were detected.
*** This may be because your kernel version is newer than what is
*** supported, or you are using a patched custom kernel with
*** incompatible modifications.
***
*** ZFS Version: $ZFS_META_ALIAS
*** Compatible Kernels: $ZFS_META_KVER_MIN - $ZFS_META_KVER_MAX
])
])
dnl #
dnl # ZFS_LINUX_TEST_RESULT_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TEST_RESULT except ZFS_CHECK_SYMBOL_EXPORT is called to
dnl # verify symbol exports, unless --enable-linux-builtin was provided to
dnl # configure.
dnl #
AC_DEFUN([ZFS_LINUX_TEST_RESULT_SYMBOL], [
AS_IF([ ! test -f build/$1/$1.ko], [
$5
], [
AS_IF([test "x$enable_linux_builtin" != "xyes"], [
ZFS_CHECK_SYMBOL_EXPORT([$2], [$3], [$4], [$5])
], [
$4
])
])
])
dnl #
dnl # ZFS_LINUX_COMPILE_IFELSE
dnl #
AC_DEFUN([ZFS_LINUX_COMPILE_IFELSE], [
ZFS_LINUX_TEST_REMOVE([conftest])
m4_ifvaln([$1], [ZFS_LINUX_CONFTEST_C([$1], [conftest])])
m4_ifvaln([$5], [ZFS_LINUX_CONFTEST_H([$5], [conftest])],
[ZFS_LINUX_CONFTEST_H([], [conftest])])
ZFS_LINUX_CONFTEST_MAKEFILE([conftest], [no],
[m4_ifvaln([$5], [-I$PWD/build/conftest], [])])
ZFS_LINUX_COMPILE([build/conftest], [$2], [$3], [$4], [], [])
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE
dnl #
dnl # $1 - global
dnl # $2 - source
dnl # $3 - run on success (valid .ko generated)
dnl # $4 - run on failure (unable to compile)
dnl #
dnl # When configuring as builtin (--enable-linux-builtin) for kernels
dnl # without loadable module support (CONFIG_MODULES=n) only the object
dnl # file is created. See ZFS_LINUX_TEST_COMPILE_ALL for details.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE], [
AS_IF([test "x$enable_linux_builtin" = "xyes"], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.o], [$3], [$4])
], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]],
[[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko], [$3], [$4])
])
])
dnl #
dnl # ZFS_CHECK_SYMBOL_EXPORT
dnl #
dnl # Check if a symbol is exported on not by consulting the symbols
dnl # file, or optionally the source code.
dnl #
AC_DEFUN([ZFS_CHECK_SYMBOL_EXPORT], [
grep -q -E '[[[:space:]]]$1[[[:space:]]]' \
$LINUX_OBJ/$LINUX_SYMBOLS 2>/dev/null
rc=$?
if test $rc -ne 0; then
export=0
for file in $2; do
grep -q -E "EXPORT_SYMBOL.*($1)" \
"$LINUX/$file" 2>/dev/null
rc=$?
if test $rc -eq 0; then
export=1
break;
fi
done
if test $export -eq 0; then :
$4
else :
$3
fi
else :
$3
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_SYMBOL
dnl #
dnl # Like ZFS_LINUX_TRY_COMPILER except ZFS_CHECK_SYMBOL_EXPORT is called
dnl # to verify symbol exports, unless --enable-linux-builtin was provided
dnl # to configure.
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_SYMBOL], [
ZFS_LINUX_TRY_COMPILE([$1], [$2], [rc=0], [rc=1])
if test $rc -ne 0; then :
$6
else
if test "x$enable_linux_builtin" != xyes; then
ZFS_CHECK_SYMBOL_EXPORT([$3], [$4], [rc=0], [rc=1])
fi
if test $rc -ne 0; then :
$6
else :
$5
fi
fi
])
dnl #
dnl # ZFS_LINUX_TRY_COMPILE_HEADER
dnl # like ZFS_LINUX_TRY_COMPILE, except the contents conftest.h are
dnl # provided via the fifth parameter
dnl #
AC_DEFUN([ZFS_LINUX_TRY_COMPILE_HEADER], [
ZFS_LINUX_COMPILE_IFELSE(
[ZFS_LINUX_TEST_PROGRAM([[$1]], [[$2]], [[ZFS_META_LICENSE]])],
[test -f build/conftest/conftest.ko],
[$3], [$4], [$5])
])
diff --git a/sys/contrib/openzfs/config/zfs-build.m4 b/sys/contrib/openzfs/config/zfs-build.m4
index 1af4356cde19..27041c054c26 100644
--- a/sys/contrib/openzfs/config/zfs-build.m4
+++ b/sys/contrib/openzfs/config/zfs-build.m4
@@ -1,625 +1,626 @@
AC_DEFUN([ZFS_AC_LICENSE], [
AC_MSG_CHECKING([zfs author])
AC_MSG_RESULT([$ZFS_META_AUTHOR])
AC_MSG_CHECKING([zfs license])
AC_MSG_RESULT([$ZFS_META_LICENSE])
])
AC_DEFUN([ZFS_AC_DEBUG_ENABLE], [
DEBUG_CFLAGS="-Werror"
DEBUG_CPPFLAGS="-DDEBUG -UNDEBUG"
DEBUG_LDFLAGS=""
DEBUG_ZFS="_with_debug"
WITH_DEBUG="true"
AC_DEFINE(ZFS_DEBUG, 1, [zfs debugging enabled])
KERNEL_DEBUG_CFLAGS="-Werror"
KERNEL_DEBUG_CPPFLAGS="-DDEBUG -UNDEBUG"
])
AC_DEFUN([ZFS_AC_DEBUG_DISABLE], [
DEBUG_CFLAGS=""
DEBUG_CPPFLAGS="-UDEBUG -DNDEBUG"
DEBUG_LDFLAGS=""
DEBUG_ZFS="_without_debug"
WITH_DEBUG=""
KERNEL_DEBUG_CFLAGS=""
KERNEL_DEBUG_CPPFLAGS="-UDEBUG -DNDEBUG"
])
dnl #
dnl # When debugging is enabled:
dnl # - Enable all ASSERTs (-DDEBUG)
dnl # - Promote all compiler warnings to errors (-Werror)
dnl #
dnl # (If INVARIANTS is detected, we need to force DEBUG, or strange panics
dnl # can ensue.)
dnl #
AC_DEFUN([ZFS_AC_DEBUG], [
AC_MSG_CHECKING([whether assertion support will be enabled])
AC_ARG_ENABLE([debug],
[AS_HELP_STRING([--enable-debug],
[Enable compiler and code assertions @<:@default=no@:>@])],
[],
[enable_debug=no])
AS_CASE(["x$enable_debug"],
["xyes"],
[ZFS_AC_DEBUG_ENABLE],
["xno"],
[ZFS_AC_DEBUG_DISABLE],
[AC_MSG_ERROR([Unknown option $enable_debug])])
AS_CASE(["x$enable_invariants"],
["xyes"],
[],
["xno"],
[],
[ZFS_AC_DEBUG_INVARIANTS_DETECT])
AS_CASE(["x$enable_invariants"],
["xyes"],
[ZFS_AC_DEBUG_ENABLE],
["xno"],
[],
[AC_MSG_ERROR([Unknown option $enable_invariants])])
AC_SUBST(DEBUG_CFLAGS)
AC_SUBST(DEBUG_CPPFLAGS)
AC_SUBST(DEBUG_LDFLAGS)
AC_SUBST(DEBUG_ZFS)
AC_SUBST(WITH_DEBUG)
AC_SUBST(KERNEL_DEBUG_CFLAGS)
AC_SUBST(KERNEL_DEBUG_CPPFLAGS)
AC_MSG_RESULT([$enable_debug])
])
AC_DEFUN([ZFS_AC_DEBUGINFO_ENABLE], [
DEBUG_CFLAGS="$DEBUG_CFLAGS -g -fno-inline $NO_IPA_SRA"
KERNEL_DEBUG_CFLAGS="$KERNEL_DEBUG_CFLAGS -fno-inline $NO_IPA_SRA"
KERNEL_MAKE="$KERNEL_MAKE CONFIG_DEBUG_INFO=y"
DEBUGINFO_ZFS="_with_debuginfo"
])
AC_DEFUN([ZFS_AC_DEBUGINFO_DISABLE], [
DEBUGINFO_ZFS="_without_debuginfo"
])
AC_DEFUN([ZFS_AC_DEBUGINFO], [
AC_MSG_CHECKING([whether debuginfo support will be forced])
AC_ARG_ENABLE([debuginfo],
[AS_HELP_STRING([--enable-debuginfo],
[Force generation of debuginfo @<:@default=no@:>@])],
[],
[enable_debuginfo=no])
AS_CASE(["x$enable_debuginfo"],
["xyes"],
[ZFS_AC_DEBUGINFO_ENABLE],
["xno"],
[ZFS_AC_DEBUGINFO_DISABLE],
[AC_MSG_ERROR([Unknown option $enable_debuginfo])])
AC_SUBST(DEBUG_CFLAGS)
AC_SUBST(DEBUGINFO_ZFS)
AC_SUBST(KERNEL_DEBUG_CFLAGS)
AC_SUBST(KERNEL_MAKE)
AC_MSG_RESULT([$enable_debuginfo])
])
dnl #
dnl # Disabled by default, provides basic memory tracking. Track the total
dnl # number of bytes allocated with kmem_alloc() and freed with kmem_free().
dnl # Then at module unload time if any bytes were leaked it will be reported
dnl # on the console.
dnl #
AC_DEFUN([ZFS_AC_DEBUG_KMEM], [
AC_MSG_CHECKING([whether basic kmem accounting is enabled])
AC_ARG_ENABLE([debug-kmem],
[AS_HELP_STRING([--enable-debug-kmem],
[Enable basic kmem accounting @<:@default=no@:>@])],
[],
[enable_debug_kmem=no])
AS_IF([test "x$enable_debug_kmem" = xyes], [
KERNEL_DEBUG_CPPFLAGS="${KERNEL_DEBUG_CPPFLAGS} -DDEBUG_KMEM"
DEBUG_KMEM_ZFS="_with_debug_kmem"
], [
DEBUG_KMEM_ZFS="_without_debug_kmem"
])
AC_SUBST(KERNEL_DEBUG_CPPFLAGS)
AC_SUBST(DEBUG_KMEM_ZFS)
AC_MSG_RESULT([$enable_debug_kmem])
])
dnl #
dnl # Disabled by default, provides detailed memory tracking. This feature
dnl # also requires --enable-debug-kmem to be set. When enabled not only will
dnl # total bytes be tracked but also the location of every kmem_alloc() and
dnl # kmem_free(). When the module is unloaded a list of all leaked addresses
dnl # and where they were allocated will be dumped to the console. Enabling
dnl # this feature has a significant impact on performance but it makes finding
dnl # memory leaks straight forward.
dnl #
AC_DEFUN([ZFS_AC_DEBUG_KMEM_TRACKING], [
AC_MSG_CHECKING([whether detailed kmem tracking is enabled])
AC_ARG_ENABLE([debug-kmem-tracking],
[AS_HELP_STRING([--enable-debug-kmem-tracking],
[Enable detailed kmem tracking @<:@default=no@:>@])],
[],
[enable_debug_kmem_tracking=no])
AS_IF([test "x$enable_debug_kmem_tracking" = xyes], [
KERNEL_DEBUG_CPPFLAGS="${KERNEL_DEBUG_CPPFLAGS} -DDEBUG_KMEM_TRACKING"
DEBUG_KMEM_TRACKING_ZFS="_with_debug_kmem_tracking"
], [
DEBUG_KMEM_TRACKING_ZFS="_without_debug_kmem_tracking"
])
AC_SUBST(KERNEL_DEBUG_CPPFLAGS)
AC_SUBST(DEBUG_KMEM_TRACKING_ZFS)
AC_MSG_RESULT([$enable_debug_kmem_tracking])
])
AC_DEFUN([ZFS_AC_DEBUG_INVARIANTS_DETECT_FREEBSD], [
AS_IF([sysctl -n kern.conftxt | fgrep -qx $'options\tINVARIANTS'],
[enable_invariants="yes"],
[enable_invariants="no"])
])
AC_DEFUN([ZFS_AC_DEBUG_INVARIANTS_DETECT], [
AM_COND_IF([BUILD_FREEBSD],
[ZFS_AC_DEBUG_INVARIANTS_DETECT_FREEBSD],
[enable_invariants="no"])
])
dnl #
dnl # Detected for the running kernel by default, enables INVARIANTS features
dnl # in the FreeBSD kernel module. This feature must be used when building
dnl # for a FreeBSD kernel with "options INVARIANTS" in the KERNCONF and must
dnl # not be used when the INVARIANTS option is absent.
dnl #
AC_DEFUN([ZFS_AC_DEBUG_INVARIANTS], [
AC_MSG_CHECKING([whether FreeBSD kernel INVARIANTS checks are enabled])
AC_ARG_ENABLE([invariants],
[AS_HELP_STRING([--enable-invariants],
[Enable FreeBSD kernel INVARIANTS checks [[default: detect]]])],
[], [ZFS_AC_DEBUG_INVARIANTS_DETECT])
AS_IF([test "x$enable_invariants" = xyes],
[WITH_INVARIANTS="true"],
[WITH_INVARIANTS=""])
AC_SUBST(WITH_INVARIANTS)
AC_MSG_RESULT([$enable_invariants])
])
AC_DEFUN([ZFS_AC_CONFIG_ALWAYS], [
AX_COUNT_CPUS([])
AC_SUBST(CPU_COUNT)
ZFS_AC_CONFIG_ALWAYS_CC_NO_UNUSED_BUT_SET_VARIABLE
ZFS_AC_CONFIG_ALWAYS_CC_NO_BOOL_COMPARE
+ ZFS_AC_CONFIG_ALWAYS_CC_IMPLICIT_FALLTHROUGH
ZFS_AC_CONFIG_ALWAYS_CC_FRAME_LARGER_THAN
ZFS_AC_CONFIG_ALWAYS_CC_NO_FORMAT_TRUNCATION
ZFS_AC_CONFIG_ALWAYS_CC_NO_FORMAT_ZERO_LENGTH
ZFS_AC_CONFIG_ALWAYS_CC_NO_OMIT_FRAME_POINTER
ZFS_AC_CONFIG_ALWAYS_CC_NO_IPA_SRA
ZFS_AC_CONFIG_ALWAYS_CC_ASAN
ZFS_AC_CONFIG_ALWAYS_TOOLCHAIN_SIMD
ZFS_AC_CONFIG_ALWAYS_SYSTEM
ZFS_AC_CONFIG_ALWAYS_ARCH
ZFS_AC_CONFIG_ALWAYS_PYTHON
ZFS_AC_CONFIG_ALWAYS_PYZFS
ZFS_AC_CONFIG_ALWAYS_SED
ZFS_AC_CONFIG_ALWAYS_CPPCHECK
ZFS_AC_CONFIG_ALWAYS_SHELLCHECK
])
AC_DEFUN([ZFS_AC_CONFIG], [
dnl # Remove the previous build test directory.
rm -Rf build
ZFS_CONFIG=all
AC_ARG_WITH([config],
AS_HELP_STRING([--with-config=CONFIG],
[Config file 'kernel|user|all|srpm']),
[ZFS_CONFIG="$withval"])
AC_ARG_ENABLE([linux-builtin],
[AS_HELP_STRING([--enable-linux-builtin],
[Configure for builtin in-tree kernel modules @<:@default=no@:>@])],
[],
[enable_linux_builtin=no])
AC_MSG_CHECKING([zfs config])
AC_MSG_RESULT([$ZFS_CONFIG]);
AC_SUBST(ZFS_CONFIG)
ZFS_AC_CONFIG_ALWAYS
AM_COND_IF([BUILD_LINUX], [
AC_ARG_VAR([TEST_JOBS], [simultaneous jobs during configure])
if test "x$ac_cv_env_TEST_JOBS_set" != "xset"; then
TEST_JOBS=$CPU_COUNT
fi
AC_SUBST(TEST_JOBS)
])
case "$ZFS_CONFIG" in
kernel) ZFS_AC_CONFIG_KERNEL ;;
user) ZFS_AC_CONFIG_USER ;;
all) ZFS_AC_CONFIG_USER
ZFS_AC_CONFIG_KERNEL ;;
srpm) ;;
*)
AC_MSG_RESULT([Error!])
AC_MSG_ERROR([Bad value "$ZFS_CONFIG" for --with-config,
user kernel|user|all|srpm]) ;;
esac
AM_CONDITIONAL([CONFIG_USER],
[test "$ZFS_CONFIG" = user -o "$ZFS_CONFIG" = all])
AM_CONDITIONAL([CONFIG_KERNEL],
[test "$ZFS_CONFIG" = kernel -o "$ZFS_CONFIG" = all] &&
[test "x$enable_linux_builtin" != xyes ])
AM_CONDITIONAL([CONFIG_QAT],
[test "$ZFS_CONFIG" = kernel -o "$ZFS_CONFIG" = all] &&
[test "x$qatsrc" != x ])
AM_CONDITIONAL([WANT_DEVNAME2DEVID], [test "x$user_libudev" = xyes ])
AM_CONDITIONAL([WANT_MMAP_LIBAIO], [test "x$user_libaio" = xyes ])
AM_CONDITIONAL([PAM_ZFS_ENABLED], [test "x$enable_pam" = xyes])
])
dnl #
dnl # Check for rpm+rpmbuild to build RPM packages. If these tools
dnl # are missing it is non-fatal but you will not be able to build
dnl # RPM packages and will be warned if you try too.
dnl #
dnl # By default the generic spec file will be used because it requires
dnl # minimal dependencies. Distribution specific spec files can be
dnl # placed under the 'rpm/<distribution>' directory and enabled using
dnl # the --with-spec=<distribution> configure option.
dnl #
AC_DEFUN([ZFS_AC_RPM], [
RPM=rpm
RPMBUILD=rpmbuild
AC_MSG_CHECKING([whether $RPM is available])
AS_IF([tmp=$($RPM --version 2>/dev/null)], [
RPM_VERSION=$(echo $tmp | $AWK '/RPM/ { print $[3] }')
HAVE_RPM=yes
AC_MSG_RESULT([$HAVE_RPM ($RPM_VERSION)])
],[
HAVE_RPM=no
AC_MSG_RESULT([$HAVE_RPM])
])
AC_MSG_CHECKING([whether $RPMBUILD is available])
AS_IF([tmp=$($RPMBUILD --version 2>/dev/null)], [
RPMBUILD_VERSION=$(echo $tmp | $AWK '/RPM/ { print $[3] }')
HAVE_RPMBUILD=yes
AC_MSG_RESULT([$HAVE_RPMBUILD ($RPMBUILD_VERSION)])
],[
HAVE_RPMBUILD=no
AC_MSG_RESULT([$HAVE_RPMBUILD])
])
RPM_DEFINE_COMMON='--define "$(DEBUG_ZFS) 1"'
RPM_DEFINE_COMMON=${RPM_DEFINE_COMMON}' --define "$(DEBUGINFO_ZFS) 1"'
RPM_DEFINE_COMMON=${RPM_DEFINE_COMMON}' --define "$(DEBUG_KMEM_ZFS) 1"'
RPM_DEFINE_COMMON=${RPM_DEFINE_COMMON}' --define "$(DEBUG_KMEM_TRACKING_ZFS) 1"'
RPM_DEFINE_COMMON=${RPM_DEFINE_COMMON}' --define "$(ASAN_ZFS) 1"'
RPM_DEFINE_UTIL=' --define "_initconfdir $(initconfdir)"'
dnl # Make the next three RPM_DEFINE_UTIL additions conditional, since
dnl # their values may not be set when running:
dnl #
dnl # ./configure --with-config=srpm
dnl #
AS_IF([test -n "$dracutdir" ], [
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' --define "_dracutdir $(dracutdir)"'
])
AS_IF([test -n "$udevdir" ], [
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' --define "_udevdir $(udevdir)"'
])
AS_IF([test -n "$udevruledir" ], [
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' --define "_udevruledir $(udevruledir)"'
])
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' $(DEFINE_SYSTEMD)'
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' $(DEFINE_PYZFS)'
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' $(DEFINE_PAM)'
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' $(DEFINE_PYTHON_VERSION)'
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' $(DEFINE_PYTHON_PKG_VERSION)'
dnl # Override default lib directory on Debian/Ubuntu systems. The
dnl # provided /usr/lib/rpm/platform/<arch>/macros files do not
dnl # specify the correct path for multiarch systems as described
dnl # by the packaging guidelines.
dnl #
dnl # https://wiki.ubuntu.com/MultiarchSpec
dnl # https://wiki.debian.org/Multiarch/Implementation
dnl #
AS_IF([test "$DEFAULT_PACKAGE" = "deb"], [
MULTIARCH_LIBDIR="lib/$(dpkg-architecture -qDEB_HOST_MULTIARCH)"
RPM_DEFINE_UTIL=${RPM_DEFINE_UTIL}' --define "_lib $(MULTIARCH_LIBDIR)"'
AC_SUBST(MULTIARCH_LIBDIR)
])
dnl # Make RPM_DEFINE_KMOD additions conditional on CONFIG_KERNEL,
dnl # since the values will not be set otherwise. The spec files
dnl # provide defaults for them.
dnl #
RPM_DEFINE_KMOD='--define "_wrong_version_format_terminate_build 0"'
AM_COND_IF([CONFIG_KERNEL], [
RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kernels $(LINUX_VERSION)"'
RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "ksrc $(LINUX)"'
RPM_DEFINE_KMOD=${RPM_DEFINE_KMOD}' --define "kobj $(LINUX_OBJ)"'
])
RPM_DEFINE_DKMS=''
SRPM_DEFINE_COMMON='--define "build_src_rpm 1"'
SRPM_DEFINE_UTIL=
SRPM_DEFINE_KMOD=
SRPM_DEFINE_DKMS=
RPM_SPEC_DIR="rpm/generic"
AC_ARG_WITH([spec],
AS_HELP_STRING([--with-spec=SPEC],
[Spec files 'generic|redhat']),
[RPM_SPEC_DIR="rpm/$withval"])
AC_MSG_CHECKING([whether spec files are available])
AC_MSG_RESULT([yes ($RPM_SPEC_DIR/*.spec.in)])
AC_SUBST(HAVE_RPM)
AC_SUBST(RPM)
AC_SUBST(RPM_VERSION)
AC_SUBST(HAVE_RPMBUILD)
AC_SUBST(RPMBUILD)
AC_SUBST(RPMBUILD_VERSION)
AC_SUBST(RPM_SPEC_DIR)
AC_SUBST(RPM_DEFINE_UTIL)
AC_SUBST(RPM_DEFINE_KMOD)
AC_SUBST(RPM_DEFINE_DKMS)
AC_SUBST(RPM_DEFINE_COMMON)
AC_SUBST(SRPM_DEFINE_UTIL)
AC_SUBST(SRPM_DEFINE_KMOD)
AC_SUBST(SRPM_DEFINE_DKMS)
AC_SUBST(SRPM_DEFINE_COMMON)
])
dnl #
dnl # Check for dpkg+dpkg-buildpackage to build DEB packages. If these
dnl # tools are missing it is non-fatal but you will not be able to build
dnl # DEB packages and will be warned if you try too.
dnl #
AC_DEFUN([ZFS_AC_DPKG], [
DPKG=dpkg
DPKGBUILD=dpkg-buildpackage
AC_MSG_CHECKING([whether $DPKG is available])
AS_IF([tmp=$($DPKG --version 2>/dev/null)], [
DPKG_VERSION=$(echo $tmp | $AWK '/Debian/ { print $[7] }')
HAVE_DPKG=yes
AC_MSG_RESULT([$HAVE_DPKG ($DPKG_VERSION)])
],[
HAVE_DPKG=no
AC_MSG_RESULT([$HAVE_DPKG])
])
AC_MSG_CHECKING([whether $DPKGBUILD is available])
AS_IF([tmp=$($DPKGBUILD --version 2>/dev/null)], [
DPKGBUILD_VERSION=$(echo $tmp | \
$AWK '/Debian/ { print $[4] }' | cut -f-4 -d'.')
HAVE_DPKGBUILD=yes
AC_MSG_RESULT([$HAVE_DPKGBUILD ($DPKGBUILD_VERSION)])
],[
HAVE_DPKGBUILD=no
AC_MSG_RESULT([$HAVE_DPKGBUILD])
])
AC_SUBST(HAVE_DPKG)
AC_SUBST(DPKG)
AC_SUBST(DPKG_VERSION)
AC_SUBST(HAVE_DPKGBUILD)
AC_SUBST(DPKGBUILD)
AC_SUBST(DPKGBUILD_VERSION)
])
dnl #
dnl # Until native packaging for various different packing systems
dnl # can be added the least we can do is attempt to use alien to
dnl # convert the RPM packages to the needed package type. This is
dnl # a hack but so far it has worked reasonable well.
dnl #
AC_DEFUN([ZFS_AC_ALIEN], [
ALIEN=alien
AC_MSG_CHECKING([whether $ALIEN is available])
AS_IF([tmp=$($ALIEN --version 2>/dev/null)], [
ALIEN_VERSION=$(echo $tmp | $AWK '{ print $[3] }')
ALIEN_MAJOR=$(echo ${ALIEN_VERSION} | $AWK -F'.' '{ print $[1] }')
ALIEN_MINOR=$(echo ${ALIEN_VERSION} | $AWK -F'.' '{ print $[2] }')
ALIEN_POINT=$(echo ${ALIEN_VERSION} | $AWK -F'.' '{ print $[3] }')
HAVE_ALIEN=yes
AC_MSG_RESULT([$HAVE_ALIEN ($ALIEN_VERSION)])
],[
HAVE_ALIEN=no
AC_MSG_RESULT([$HAVE_ALIEN])
])
AC_SUBST(HAVE_ALIEN)
AC_SUBST(ALIEN)
AC_SUBST(ALIEN_VERSION)
AC_SUBST(ALIEN_MAJOR)
AC_SUBST(ALIEN_MINOR)
AC_SUBST(ALIEN_POINT)
])
dnl #
dnl # Using the VENDOR tag from config.guess set the default
dnl # package type for 'make pkg': (rpm | deb | tgz)
dnl #
AC_DEFUN([ZFS_AC_DEFAULT_PACKAGE], [
AC_MSG_CHECKING([os distribution])
AC_ARG_WITH([vendor],
[AS_HELP_STRING([--with-vendor],
[Distribution vendor @<:@default=check@:>@])],
[with_vendor=$withval],
[with_vendor=check])
AS_IF([test "x$with_vendor" = "xcheck"],[
if test -f /etc/toss-release ; then
VENDOR=toss ;
elif test -f /etc/fedora-release ; then
VENDOR=fedora ;
elif test -f /etc/redhat-release ; then
VENDOR=redhat ;
elif test -f /etc/gentoo-release ; then
VENDOR=gentoo ;
elif test -f /etc/arch-release ; then
VENDOR=arch ;
elif test -f /etc/SuSE-release ; then
VENDOR=sles ;
elif test -f /etc/slackware-version ; then
VENDOR=slackware ;
elif test -f /etc/lunar.release ; then
VENDOR=lunar ;
elif test -f /etc/lsb-release ; then
VENDOR=ubuntu ;
elif test -f /etc/debian_version ; then
VENDOR=debian ;
elif test -f /etc/alpine-release ; then
VENDOR=alpine ;
elif test -f /bin/freebsd-version ; then
VENDOR=freebsd ;
else
VENDOR= ;
fi],
[ test "x${with_vendor}" != x],[
VENDOR="$with_vendor" ],
[ VENDOR= ; ]
)
AC_MSG_RESULT([$VENDOR])
AC_SUBST(VENDOR)
AC_MSG_CHECKING([default package type])
case "$VENDOR" in
toss) DEFAULT_PACKAGE=rpm ;;
redhat) DEFAULT_PACKAGE=rpm ;;
fedora) DEFAULT_PACKAGE=rpm ;;
gentoo) DEFAULT_PACKAGE=tgz ;;
alpine) DEFAULT_PACKAGE=tgz ;;
arch) DEFAULT_PACKAGE=tgz ;;
sles) DEFAULT_PACKAGE=rpm ;;
slackware) DEFAULT_PACKAGE=tgz ;;
lunar) DEFAULT_PACKAGE=tgz ;;
ubuntu) DEFAULT_PACKAGE=deb ;;
debian) DEFAULT_PACKAGE=deb ;;
freebsd) DEFAULT_PACKAGE=pkg ;;
*) DEFAULT_PACKAGE=rpm ;;
esac
AC_MSG_RESULT([$DEFAULT_PACKAGE])
AC_SUBST(DEFAULT_PACKAGE)
AC_MSG_CHECKING([default init directory])
case "$VENDOR" in
freebsd) initdir=$sysconfdir/rc.d ;;
*) initdir=$sysconfdir/init.d;;
esac
AC_MSG_RESULT([$initdir])
AC_SUBST(initdir)
AC_MSG_CHECKING([default init script type and shell])
case "$VENDOR" in
toss) DEFAULT_INIT_SCRIPT=redhat ;;
redhat) DEFAULT_INIT_SCRIPT=redhat ;;
fedora) DEFAULT_INIT_SCRIPT=fedora ;;
gentoo) DEFAULT_INIT_SCRIPT=openrc ;;
alpine) DEFAULT_INIT_SCRIPT=openrc ;;
arch) DEFAULT_INIT_SCRIPT=lsb ;;
sles) DEFAULT_INIT_SCRIPT=lsb ;;
slackware) DEFAULT_INIT_SCRIPT=lsb ;;
lunar) DEFAULT_INIT_SCRIPT=lunar ;;
ubuntu) DEFAULT_INIT_SCRIPT=lsb ;;
debian) DEFAULT_INIT_SCRIPT=lsb ;;
freebsd) DEFAULT_INIT_SCRIPT=freebsd;;
*) DEFAULT_INIT_SCRIPT=lsb ;;
esac
# On gentoo, it's possible that OpenRC isn't installed. Check if
# /sbin/openrc-run exists, and if not, fall back to generic defaults.
DEFAULT_INIT_SHELL="/bin/sh"
AS_IF([test "$DEFAULT_INIT_SCRIPT" = "openrc"], [
AS_IF([test -x "/sbin/openrc-run"],
[DEFAULT_INIT_SHELL="/sbin/openrc-run"],
[DEFAULT_INIT_SCRIPT=lsb])
])
AC_MSG_RESULT([$DEFAULT_INIT_SCRIPT:$DEFAULT_INIT_SHELL])
AC_SUBST(DEFAULT_INIT_SCRIPT)
AC_SUBST(DEFAULT_INIT_SHELL)
AC_MSG_CHECKING([default nfs server init script])
AS_IF([test "$VENDOR" = "debian"],
[DEFAULT_INIT_NFS_SERVER="nfs-kernel-server"],
[DEFAULT_INIT_NFS_SERVER="nfs"]
)
AC_MSG_RESULT([$DEFAULT_INIT_NFS_SERVER])
AC_SUBST(DEFAULT_INIT_NFS_SERVER)
AC_MSG_CHECKING([default init config directory])
case "$VENDOR" in
alpine) initconfdir=/etc/conf.d ;;
gentoo) initconfdir=/etc/conf.d ;;
toss) initconfdir=/etc/sysconfig ;;
redhat) initconfdir=/etc/sysconfig ;;
fedora) initconfdir=/etc/sysconfig ;;
sles) initconfdir=/etc/sysconfig ;;
ubuntu) initconfdir=/etc/default ;;
debian) initconfdir=/etc/default ;;
freebsd) initconfdir=$sysconfdir/rc.conf.d;;
*) initconfdir=/etc/default ;;
esac
AC_MSG_RESULT([$initconfdir])
AC_SUBST(initconfdir)
AC_MSG_CHECKING([whether initramfs-tools is available])
if test -d /usr/share/initramfs-tools ; then
RPM_DEFINE_INITRAMFS='--define "_initramfs 1"'
AC_MSG_RESULT([yes])
else
RPM_DEFINE_INITRAMFS=''
AC_MSG_RESULT([no])
fi
AC_SUBST(RPM_DEFINE_INITRAMFS)
])
dnl #
dnl # Default ZFS package configuration
dnl #
AC_DEFUN([ZFS_AC_PACKAGE], [
ZFS_AC_DEFAULT_PACKAGE
AS_IF([test x$VENDOR != xfreebsd], [
ZFS_AC_RPM
ZFS_AC_DPKG
ZFS_AC_ALIEN
])
])
diff --git a/sys/contrib/openzfs/config/zfs-meta.m4 b/sys/contrib/openzfs/config/zfs-meta.m4
index b3c1befaac5d..1c9d246124d1 100644
--- a/sys/contrib/openzfs/config/zfs-meta.m4
+++ b/sys/contrib/openzfs/config/zfs-meta.m4
@@ -1,207 +1,207 @@
dnl #
dnl # DESCRIPTION:
dnl # Read meta data from the META file. When building from a git repository
dnl # the ZFS_META_RELEASE field will be overwritten if there is an annotated
dnl # tag matching the form ZFS_META_NAME-ZFS_META_VERSION-*. This allows
dnl # for working builds to be uniquely identified using the git commit hash.
dnl #
dnl # The META file format is as follows:
dnl # ^[ ]*KEY:[ \t]+VALUE$
dnl #
dnl # In other words:
dnl # - KEY is separated from VALUE by a colon and one or more spaces/tabs.
dnl # - KEY and VALUE are case sensitive.
dnl # - Leading spaces are ignored.
dnl # - First match wins for duplicate keys.
dnl #
dnl # A line can be commented out by preceding it with a '#' (or technically
dnl # any non-space character since that will prevent the regex from
dnl # matching).
dnl #
dnl # WARNING:
dnl # Placing a colon followed by a space or tab (ie, ":[ \t]+") within the
dnl # VALUE will prematurely terminate the string since that sequence is
dnl # used as the awk field separator.
dnl #
dnl # KEYS:
dnl # The following META keys are recognized:
dnl # Name, Version, Release, Date, Author, LT_Current, LT_Revision, LT_Age
dnl #
dnl # Written by Chris Dunlap <cdunlap@llnl.gov>.
dnl # Modified by Brian Behlendorf <behlendorf1@llnl.gov>.
dnl #
AC_DEFUN([ZFS_AC_META], [
AH_BOTTOM([
#undef PACKAGE
#undef PACKAGE_BUGREPORT
#undef PACKAGE_NAME
#undef PACKAGE_STRING
#undef PACKAGE_TARNAME
#undef PACKAGE_VERSION
#undef STDC_HEADERS
#undef VERSION])
AC_PROG_AWK
AC_MSG_CHECKING([metadata])
META="$srcdir/META"
_zfs_ac_meta_type="none"
if test -f "$META"; then
_zfs_ac_meta_type="META file"
ZFS_META_NAME=_ZFS_AC_META_GETVAL([(Name|Project|Package)]);
if test -n "$ZFS_META_NAME"; then
AC_DEFINE_UNQUOTED([ZFS_META_NAME], ["$ZFS_META_NAME"],
[Define the project name.]
)
AC_SUBST([ZFS_META_NAME])
fi
ZFS_META_VERSION=_ZFS_AC_META_GETVAL([Version]);
if test -n "$ZFS_META_VERSION"; then
AC_DEFINE_UNQUOTED([ZFS_META_VERSION],
["$ZFS_META_VERSION"],
[Define the project version.])
AC_DEFINE_UNQUOTED([SPL_META_VERSION],
[ZFS_META_VERSION],
[Defined for legacy compatibility.])
AC_SUBST([ZFS_META_VERSION])
fi
ZFS_META_RELEASE=_ZFS_AC_META_GETVAL([Release]);
if test ! -f ".nogitrelease" && git rev-parse --git-dir > /dev/null 2>&1; then
_match="${ZFS_META_NAME}-${ZFS_META_VERSION}"
_alias=$(git describe --match=${_match} 2>/dev/null)
- _release=$(echo ${_alias}|cut -f3- -d'-'|sed 's/-/_/g')
+ _release=$(echo ${_alias}|sed "s/${ZFS_META_NAME}//"|cut -f3- -d'-'|sed 's/-/_/g')
if test -n "${_release}"; then
ZFS_META_RELEASE=${_release}
_zfs_ac_meta_type="git describe"
else
_match="${ZFS_META_NAME}-${ZFS_META_VERSION}-${ZFS_META_RELEASE}"
_alias=$(git describe --match=${_match} 2>/dev/null)
- _release=$(echo ${_alias}|cut -f3- -d'-'|sed 's/-/_/g')
+ _release=$(echo ${_alias}|sed 's/${ZFS_META_NAME}//'|cut -f3- -d'-'|sed 's/-/_/g')
if test -n "${_release}"; then
ZFS_META_RELEASE=${_release}
_zfs_ac_meta_type="git describe"
fi
fi
fi
if test -n "$ZFS_META_RELEASE"; then
AC_DEFINE_UNQUOTED([ZFS_META_RELEASE],
["$ZFS_META_RELEASE"],
[Define the project release.])
AC_DEFINE_UNQUOTED([SPL_META_RELEASE],
[ZFS_META_RELEASE],
[Defined for legacy compatibility.])
AC_SUBST([ZFS_META_RELEASE])
RELEASE="$ZFS_META_RELEASE"
AC_SUBST([RELEASE])
fi
ZFS_META_LICENSE=_ZFS_AC_META_GETVAL([License]);
if test -n "$ZFS_META_LICENSE"; then
AC_DEFINE_UNQUOTED([ZFS_META_LICENSE], ["$ZFS_META_LICENSE"],
[Define the project license.]
)
AC_SUBST([ZFS_META_LICENSE])
fi
if test -n "$ZFS_META_NAME" -a -n "$ZFS_META_VERSION"; then
ZFS_META_ALIAS="$ZFS_META_NAME-$ZFS_META_VERSION"
test -n "$ZFS_META_RELEASE" &&
ZFS_META_ALIAS="$ZFS_META_ALIAS-$ZFS_META_RELEASE"
AC_DEFINE_UNQUOTED([ZFS_META_ALIAS],
["$ZFS_META_ALIAS"],
[Define the project alias string.])
AC_DEFINE_UNQUOTED([SPL_META_ALIAS],
[ZFS_META_ALIAS],
[Defined for legacy compatibility.])
AC_SUBST([ZFS_META_ALIAS])
fi
ZFS_META_DATA=_ZFS_AC_META_GETVAL([Date]);
if test -n "$ZFS_META_DATA"; then
AC_DEFINE_UNQUOTED([ZFS_META_DATA], ["$ZFS_META_DATA"],
[Define the project release date.]
)
AC_SUBST([ZFS_META_DATA])
fi
ZFS_META_AUTHOR=_ZFS_AC_META_GETVAL([Author]);
if test -n "$ZFS_META_AUTHOR"; then
AC_DEFINE_UNQUOTED([ZFS_META_AUTHOR], ["$ZFS_META_AUTHOR"],
[Define the project author.]
)
AC_SUBST([ZFS_META_AUTHOR])
fi
ZFS_META_KVER_MIN=_ZFS_AC_META_GETVAL([Linux-Minimum]);
if test -n "$ZFS_META_KVER_MIN"; then
AC_DEFINE_UNQUOTED([ZFS_META_KVER_MIN],
["$ZFS_META_KVER_MIN"],
[Define the minimum compatible kernel version.]
)
AC_SUBST([ZFS_META_KVER_MIN])
fi
ZFS_META_KVER_MAX=_ZFS_AC_META_GETVAL([Linux-Maximum]);
if test -n "$ZFS_META_KVER_MAX"; then
AC_DEFINE_UNQUOTED([ZFS_META_KVER_MAX],
["$ZFS_META_KVER_MAX"],
[Define the maximum compatible kernel version.]
)
AC_SUBST([ZFS_META_KVER_MAX])
fi
m4_pattern_allow([^LT_(CURRENT|REVISION|AGE)$])
ZFS_META_LT_CURRENT=_ZFS_AC_META_GETVAL([LT_Current]);
ZFS_META_LT_REVISION=_ZFS_AC_META_GETVAL([LT_Revision]);
ZFS_META_LT_AGE=_ZFS_AC_META_GETVAL([LT_Age]);
if test -n "$ZFS_META_LT_CURRENT" \
-o -n "$ZFS_META_LT_REVISION" \
-o -n "$ZFS_META_LT_AGE"; then
test -n "$ZFS_META_LT_CURRENT" || ZFS_META_LT_CURRENT="0"
test -n "$ZFS_META_LT_REVISION" || ZFS_META_LT_REVISION="0"
test -n "$ZFS_META_LT_AGE" || ZFS_META_LT_AGE="0"
AC_DEFINE_UNQUOTED([ZFS_META_LT_CURRENT],
["$ZFS_META_LT_CURRENT"],
[Define the libtool library 'current'
version information.]
)
AC_DEFINE_UNQUOTED([ZFS_META_LT_REVISION],
["$ZFS_META_LT_REVISION"],
[Define the libtool library 'revision'
version information.]
)
AC_DEFINE_UNQUOTED([ZFS_META_LT_AGE], ["$ZFS_META_LT_AGE"],
[Define the libtool library 'age'
version information.]
)
AC_SUBST([ZFS_META_LT_CURRENT])
AC_SUBST([ZFS_META_LT_REVISION])
AC_SUBST([ZFS_META_LT_AGE])
fi
fi
AC_MSG_RESULT([$_zfs_ac_meta_type])
]
)
dnl # _ZFS_AC_META_GETVAL (KEY_NAME_OR_REGEX)
dnl #
dnl # Returns the META VALUE associated with the given KEY_NAME_OR_REGEX expr.
dnl #
dnl # Despite their resemblance to line noise,
dnl # the "@<:@" and "@:>@" constructs are quadrigraphs for "[" and "]".
dnl # <www.gnu.org/software/autoconf/manual/autoconf.html#Quadrigraphs>
dnl #
dnl # The "$[]1" and "$[]2" constructs prevent M4 parameter expansion
dnl # so a literal $1 and $2 will be passed to the resulting awk script,
dnl # whereas the "$1" will undergo M4 parameter expansion for the META key.
dnl #
AC_DEFUN([_ZFS_AC_META_GETVAL],
[`$AWK -F ':@<:@ \t@:>@+' '$[]1 ~ /^ *$1$/ { print $[]2; exit }' $META`]dnl
)
diff --git a/sys/contrib/openzfs/include/libzfs.h b/sys/contrib/openzfs/include/libzfs.h
index 9ef280636d4c..270f810027bb 100644
--- a/sys/contrib/openzfs/include/libzfs.h
+++ b/sys/contrib/openzfs/include/libzfs.h
@@ -1,967 +1,969 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright Joyent, Inc.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2016, Intel Corporation.
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
#ifndef _LIBZFS_H
#define _LIBZFS_H extern __attribute__((visibility("default")))
#include <assert.h>
#include <libnvpair.h>
#include <sys/mnttab.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/varargs.h>
#include <sys/fs/zfs.h>
#include <sys/avl.h>
#include <ucred.h>
#include <libzfs_core.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Miscellaneous ZFS constants
*/
#define ZFS_MAXPROPLEN MAXPATHLEN
#define ZPOOL_MAXPROPLEN MAXPATHLEN
/*
* libzfs errors
*/
typedef enum zfs_error {
EZFS_SUCCESS = 0, /* no error -- success */
EZFS_NOMEM = 2000, /* out of memory */
EZFS_BADPROP, /* invalid property value */
EZFS_PROPREADONLY, /* cannot set readonly property */
EZFS_PROPTYPE, /* property does not apply to dataset type */
EZFS_PROPNONINHERIT, /* property is not inheritable */
EZFS_PROPSPACE, /* bad quota or reservation */
EZFS_BADTYPE, /* dataset is not of appropriate type */
EZFS_BUSY, /* pool or dataset is busy */
EZFS_EXISTS, /* pool or dataset already exists */
EZFS_NOENT, /* no such pool or dataset */
EZFS_BADSTREAM, /* bad backup stream */
EZFS_DSREADONLY, /* dataset is readonly */
EZFS_VOLTOOBIG, /* volume is too large for 32-bit system */
EZFS_INVALIDNAME, /* invalid dataset name */
EZFS_BADRESTORE, /* unable to restore to destination */
EZFS_BADBACKUP, /* backup failed */
EZFS_BADTARGET, /* bad attach/detach/replace target */
EZFS_NODEVICE, /* no such device in pool */
EZFS_BADDEV, /* invalid device to add */
EZFS_NOREPLICAS, /* no valid replicas */
EZFS_RESILVERING, /* resilvering (healing reconstruction) */
EZFS_BADVERSION, /* unsupported version */
EZFS_POOLUNAVAIL, /* pool is currently unavailable */
EZFS_DEVOVERFLOW, /* too many devices in one vdev */
EZFS_BADPATH, /* must be an absolute path */
EZFS_CROSSTARGET, /* rename or clone across pool or dataset */
EZFS_ZONED, /* used improperly in local zone */
EZFS_MOUNTFAILED, /* failed to mount dataset */
EZFS_UMOUNTFAILED, /* failed to unmount dataset */
EZFS_UNSHARENFSFAILED, /* failed to unshare over nfs */
EZFS_SHARENFSFAILED, /* failed to share over nfs */
EZFS_PERM, /* permission denied */
EZFS_NOSPC, /* out of space */
EZFS_FAULT, /* bad address */
EZFS_IO, /* I/O error */
EZFS_INTR, /* signal received */
EZFS_ISSPARE, /* device is a hot spare */
EZFS_INVALCONFIG, /* invalid vdev configuration */
EZFS_RECURSIVE, /* recursive dependency */
EZFS_NOHISTORY, /* no history object */
EZFS_POOLPROPS, /* couldn't retrieve pool props */
EZFS_POOL_NOTSUP, /* ops not supported for this type of pool */
EZFS_POOL_INVALARG, /* invalid argument for this pool operation */
EZFS_NAMETOOLONG, /* dataset name is too long */
EZFS_OPENFAILED, /* open of device failed */
EZFS_NOCAP, /* couldn't get capacity */
EZFS_LABELFAILED, /* write of label failed */
EZFS_BADWHO, /* invalid permission who */
EZFS_BADPERM, /* invalid permission */
EZFS_BADPERMSET, /* invalid permission set name */
EZFS_NODELEGATION, /* delegated administration is disabled */
EZFS_UNSHARESMBFAILED, /* failed to unshare over smb */
EZFS_SHARESMBFAILED, /* failed to share over smb */
EZFS_BADCACHE, /* bad cache file */
EZFS_ISL2CACHE, /* device is for the level 2 ARC */
EZFS_VDEVNOTSUP, /* unsupported vdev type */
EZFS_NOTSUP, /* ops not supported on this dataset */
EZFS_ACTIVE_SPARE, /* pool has active shared spare devices */
EZFS_UNPLAYED_LOGS, /* log device has unplayed logs */
EZFS_REFTAG_RELE, /* snapshot release: tag not found */
EZFS_REFTAG_HOLD, /* snapshot hold: tag already exists */
EZFS_TAGTOOLONG, /* snapshot hold/rele: tag too long */
EZFS_PIPEFAILED, /* pipe create failed */
EZFS_THREADCREATEFAILED, /* thread create failed */
EZFS_POSTSPLIT_ONLINE, /* onlining a disk after splitting it */
EZFS_SCRUBBING, /* currently scrubbing */
EZFS_NO_SCRUB, /* no active scrub */
EZFS_DIFF, /* general failure of zfs diff */
EZFS_DIFFDATA, /* bad zfs diff data */
EZFS_POOLREADONLY, /* pool is in read-only mode */
EZFS_SCRUB_PAUSED, /* scrub currently paused */
EZFS_ACTIVE_POOL, /* pool is imported on a different system */
EZFS_CRYPTOFAILED, /* failed to setup encryption */
EZFS_NO_PENDING, /* cannot cancel, no operation is pending */
EZFS_CHECKPOINT_EXISTS, /* checkpoint exists */
EZFS_DISCARDING_CHECKPOINT, /* currently discarding a checkpoint */
EZFS_NO_CHECKPOINT, /* pool has no checkpoint */
EZFS_DEVRM_IN_PROGRESS, /* a device is currently being removed */
EZFS_VDEV_TOO_BIG, /* a device is too big to be used */
EZFS_IOC_NOTSUPPORTED, /* operation not supported by zfs module */
EZFS_TOOMANY, /* argument list too long */
EZFS_INITIALIZING, /* currently initializing */
EZFS_NO_INITIALIZE, /* no active initialize */
EZFS_WRONG_PARENT, /* invalid parent dataset (e.g ZVOL) */
EZFS_TRIMMING, /* currently trimming */
EZFS_NO_TRIM, /* no active trim */
EZFS_TRIM_NOTSUP, /* device does not support trim */
EZFS_NO_RESILVER_DEFER, /* pool doesn't support resilver_defer */
EZFS_EXPORT_IN_PROGRESS, /* currently exporting the pool */
EZFS_REBUILDING, /* resilvering (sequential reconstrution) */
EZFS_UNKNOWN
} zfs_error_t;
/*
* The following data structures are all part
* of the zfs_allow_t data structure which is
* used for printing 'allow' permissions.
* It is a linked list of zfs_allow_t's which
* then contain avl tree's for user/group/sets/...
* and each one of the entries in those trees have
* avl tree's for the permissions they belong to and
* whether they are local,descendent or local+descendent
* permissions. The AVL trees are used primarily for
* sorting purposes, but also so that we can quickly find
* a given user and or permission.
*/
typedef struct zfs_perm_node {
avl_node_t z_node;
char z_pname[MAXPATHLEN];
} zfs_perm_node_t;
typedef struct zfs_allow_node {
avl_node_t z_node;
char z_key[MAXPATHLEN]; /* name, such as joe */
avl_tree_t z_localdescend; /* local+descendent perms */
avl_tree_t z_local; /* local permissions */
avl_tree_t z_descend; /* descendent permissions */
} zfs_allow_node_t;
typedef struct zfs_allow {
struct zfs_allow *z_next;
char z_setpoint[MAXPATHLEN];
avl_tree_t z_sets;
avl_tree_t z_crperms;
avl_tree_t z_user;
avl_tree_t z_group;
avl_tree_t z_everyone;
} zfs_allow_t;
/*
* Basic handle types
*/
typedef struct zfs_handle zfs_handle_t;
typedef struct zpool_handle zpool_handle_t;
typedef struct libzfs_handle libzfs_handle_t;
_LIBZFS_H int zpool_wait(zpool_handle_t *, zpool_wait_activity_t);
_LIBZFS_H int zpool_wait_status(zpool_handle_t *, zpool_wait_activity_t,
boolean_t *, boolean_t *);
/*
* Library initialization
*/
_LIBZFS_H libzfs_handle_t *libzfs_init(void);
_LIBZFS_H void libzfs_fini(libzfs_handle_t *);
_LIBZFS_H libzfs_handle_t *zpool_get_handle(zpool_handle_t *);
_LIBZFS_H libzfs_handle_t *zfs_get_handle(zfs_handle_t *);
_LIBZFS_H void libzfs_print_on_error(libzfs_handle_t *, boolean_t);
_LIBZFS_H void zfs_save_arguments(int argc, char **, char *, int);
_LIBZFS_H int zpool_log_history(libzfs_handle_t *, const char *);
_LIBZFS_H int libzfs_errno(libzfs_handle_t *);
_LIBZFS_H const char *libzfs_error_init(int);
_LIBZFS_H const char *libzfs_error_action(libzfs_handle_t *);
_LIBZFS_H const char *libzfs_error_description(libzfs_handle_t *);
_LIBZFS_H int zfs_standard_error(libzfs_handle_t *, int, const char *);
_LIBZFS_H void libzfs_mnttab_init(libzfs_handle_t *);
_LIBZFS_H void libzfs_mnttab_fini(libzfs_handle_t *);
_LIBZFS_H void libzfs_mnttab_cache(libzfs_handle_t *, boolean_t);
_LIBZFS_H int libzfs_mnttab_find(libzfs_handle_t *, const char *,
struct mnttab *);
_LIBZFS_H void libzfs_mnttab_add(libzfs_handle_t *, const char *,
const char *, const char *);
_LIBZFS_H void libzfs_mnttab_remove(libzfs_handle_t *, const char *);
/*
* Basic handle functions
*/
_LIBZFS_H zpool_handle_t *zpool_open(libzfs_handle_t *, const char *);
_LIBZFS_H zpool_handle_t *zpool_open_canfail(libzfs_handle_t *, const char *);
_LIBZFS_H void zpool_close(zpool_handle_t *);
_LIBZFS_H const char *zpool_get_name(zpool_handle_t *);
_LIBZFS_H int zpool_get_state(zpool_handle_t *);
_LIBZFS_H const char *zpool_state_to_name(vdev_state_t, vdev_aux_t);
_LIBZFS_H const char *zpool_pool_state_to_name(pool_state_t);
_LIBZFS_H void zpool_free_handles(libzfs_handle_t *);
/*
* Iterate over all active pools in the system.
*/
typedef int (*zpool_iter_f)(zpool_handle_t *, void *);
_LIBZFS_H int zpool_iter(libzfs_handle_t *, zpool_iter_f, void *);
_LIBZFS_H boolean_t zpool_skip_pool(const char *);
/*
* Functions to create and destroy pools
*/
_LIBZFS_H int zpool_create(libzfs_handle_t *, const char *, nvlist_t *,
nvlist_t *, nvlist_t *);
_LIBZFS_H int zpool_destroy(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_add(zpool_handle_t *, nvlist_t *);
typedef struct splitflags {
/* do not split, but return the config that would be split off */
int dryrun : 1;
/* after splitting, import the pool */
int import : 1;
int name_flags;
} splitflags_t;
typedef struct trimflags {
/* requested vdevs are for the entire pool */
boolean_t fullpool;
/* request a secure trim, requires support from device */
boolean_t secure;
/* after starting trim, block until trim completes */
boolean_t wait;
/* trim at the requested rate in bytes/second */
uint64_t rate;
} trimflags_t;
/*
* Functions to manipulate pool and vdev state
*/
_LIBZFS_H int zpool_scan(zpool_handle_t *, pool_scan_func_t, pool_scrub_cmd_t);
_LIBZFS_H int zpool_initialize(zpool_handle_t *, pool_initialize_func_t,
nvlist_t *);
_LIBZFS_H int zpool_initialize_wait(zpool_handle_t *, pool_initialize_func_t,
nvlist_t *);
_LIBZFS_H int zpool_trim(zpool_handle_t *, pool_trim_func_t, nvlist_t *,
trimflags_t *);
_LIBZFS_H int zpool_clear(zpool_handle_t *, const char *, nvlist_t *);
_LIBZFS_H int zpool_reguid(zpool_handle_t *);
_LIBZFS_H int zpool_reopen_one(zpool_handle_t *, void *);
_LIBZFS_H int zpool_sync_one(zpool_handle_t *, void *);
_LIBZFS_H int zpool_vdev_online(zpool_handle_t *, const char *, int,
vdev_state_t *);
_LIBZFS_H int zpool_vdev_offline(zpool_handle_t *, const char *, boolean_t);
_LIBZFS_H int zpool_vdev_attach(zpool_handle_t *, const char *,
const char *, nvlist_t *, int, boolean_t);
_LIBZFS_H int zpool_vdev_detach(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_vdev_remove(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_vdev_remove_cancel(zpool_handle_t *);
_LIBZFS_H int zpool_vdev_indirect_size(zpool_handle_t *, const char *,
uint64_t *);
_LIBZFS_H int zpool_vdev_split(zpool_handle_t *, char *, nvlist_t **,
nvlist_t *, splitflags_t);
_LIBZFS_H int zpool_vdev_fault(zpool_handle_t *, uint64_t, vdev_aux_t);
_LIBZFS_H int zpool_vdev_degrade(zpool_handle_t *, uint64_t, vdev_aux_t);
_LIBZFS_H int zpool_vdev_clear(zpool_handle_t *, uint64_t);
_LIBZFS_H nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *,
boolean_t *, boolean_t *);
_LIBZFS_H nvlist_t *zpool_find_vdev_by_physpath(zpool_handle_t *, const char *,
boolean_t *, boolean_t *, boolean_t *);
_LIBZFS_H int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *,
const char *);
_LIBZFS_H uint64_t zpool_vdev_path_to_guid(zpool_handle_t *zhp,
const char *path);
_LIBZFS_H const char *zpool_get_state_str(zpool_handle_t *);
/*
* Functions to manage pool properties
*/
_LIBZFS_H int zpool_set_prop(zpool_handle_t *, const char *, const char *);
_LIBZFS_H int zpool_get_prop(zpool_handle_t *, zpool_prop_t, char *,
size_t proplen, zprop_source_t *, boolean_t literal);
_LIBZFS_H uint64_t zpool_get_prop_int(zpool_handle_t *, zpool_prop_t,
zprop_source_t *);
_LIBZFS_H int zpool_props_refresh(zpool_handle_t *);
_LIBZFS_H const char *zpool_prop_to_name(zpool_prop_t);
_LIBZFS_H const char *zpool_prop_values(zpool_prop_t);
/*
* Pool health statistics.
*/
typedef enum {
/*
* The following correspond to faults as defined in the (fault.fs.zfs.*)
* event namespace. Each is associated with a corresponding message ID.
* This must be kept in sync with the zfs_msgid_table in
* lib/libzfs/libzfs_status.c.
*/
ZPOOL_STATUS_CORRUPT_CACHE, /* corrupt /kernel/drv/zpool.cache */
ZPOOL_STATUS_MISSING_DEV_R, /* missing device with replicas */
ZPOOL_STATUS_MISSING_DEV_NR, /* missing device with no replicas */
ZPOOL_STATUS_CORRUPT_LABEL_R, /* bad device label with replicas */
ZPOOL_STATUS_CORRUPT_LABEL_NR, /* bad device label with no replicas */
ZPOOL_STATUS_BAD_GUID_SUM, /* sum of device guids didn't match */
ZPOOL_STATUS_CORRUPT_POOL, /* pool metadata is corrupted */
ZPOOL_STATUS_CORRUPT_DATA, /* data errors in user (meta)data */
ZPOOL_STATUS_FAILING_DEV, /* device experiencing errors */
ZPOOL_STATUS_VERSION_NEWER, /* newer on-disk version */
ZPOOL_STATUS_HOSTID_MISMATCH, /* last accessed by another system */
ZPOOL_STATUS_HOSTID_ACTIVE, /* currently active on another system */
ZPOOL_STATUS_HOSTID_REQUIRED, /* multihost=on and hostid=0 */
ZPOOL_STATUS_IO_FAILURE_WAIT, /* failed I/O, failmode 'wait' */
ZPOOL_STATUS_IO_FAILURE_CONTINUE, /* failed I/O, failmode 'continue' */
ZPOOL_STATUS_IO_FAILURE_MMP, /* failed MMP, failmode not 'panic' */
ZPOOL_STATUS_BAD_LOG, /* cannot read log chain(s) */
ZPOOL_STATUS_ERRATA, /* informational errata available */
/*
* If the pool has unsupported features but can still be opened in
* read-only mode, its status is ZPOOL_STATUS_UNSUP_FEAT_WRITE. If the
* pool has unsupported features but cannot be opened at all, its
* status is ZPOOL_STATUS_UNSUP_FEAT_READ.
*/
ZPOOL_STATUS_UNSUP_FEAT_READ, /* unsupported features for read */
ZPOOL_STATUS_UNSUP_FEAT_WRITE, /* unsupported features for write */
/*
* These faults have no corresponding message ID. At the time we are
* checking the status, the original reason for the FMA fault (I/O or
* checksum errors) has been lost.
*/
ZPOOL_STATUS_FAULTED_DEV_R, /* faulted device with replicas */
ZPOOL_STATUS_FAULTED_DEV_NR, /* faulted device with no replicas */
/*
* The following are not faults per se, but still an error possibly
* requiring administrative attention. There is no corresponding
* message ID.
*/
ZPOOL_STATUS_VERSION_OLDER, /* older legacy on-disk version */
ZPOOL_STATUS_FEAT_DISABLED, /* supported features are disabled */
ZPOOL_STATUS_RESILVERING, /* device being resilvered */
ZPOOL_STATUS_OFFLINE_DEV, /* device offline */
ZPOOL_STATUS_REMOVED_DEV, /* removed device */
ZPOOL_STATUS_REBUILDING, /* device being rebuilt */
ZPOOL_STATUS_REBUILD_SCRUB, /* recommend scrubbing the pool */
ZPOOL_STATUS_NON_NATIVE_ASHIFT, /* (e.g. 512e dev with ashift of 9) */
ZPOOL_STATUS_COMPATIBILITY_ERR, /* bad 'compatibility' property */
ZPOOL_STATUS_INCOMPATIBLE_FEAT, /* feature set outside compatibility */
/*
* Finally, the following indicates a healthy pool.
*/
ZPOOL_STATUS_OK
} zpool_status_t;
_LIBZFS_H zpool_status_t zpool_get_status(zpool_handle_t *, char **,
zpool_errata_t *);
_LIBZFS_H zpool_status_t zpool_import_status(nvlist_t *, char **,
zpool_errata_t *);
/*
* Statistics and configuration functions.
*/
_LIBZFS_H nvlist_t *zpool_get_config(zpool_handle_t *, nvlist_t **);
_LIBZFS_H nvlist_t *zpool_get_features(zpool_handle_t *);
_LIBZFS_H int zpool_refresh_stats(zpool_handle_t *, boolean_t *);
_LIBZFS_H int zpool_get_errlog(zpool_handle_t *, nvlist_t **);
/*
* Import and export functions
*/
_LIBZFS_H int zpool_export(zpool_handle_t *, boolean_t, const char *);
_LIBZFS_H int zpool_export_force(zpool_handle_t *, const char *);
_LIBZFS_H int zpool_import(libzfs_handle_t *, nvlist_t *, const char *,
char *altroot);
_LIBZFS_H int zpool_import_props(libzfs_handle_t *, nvlist_t *, const char *,
nvlist_t *, int);
_LIBZFS_H void zpool_print_unsup_feat(nvlist_t *config);
/*
* Miscellaneous pool functions
*/
struct zfs_cmd;
_LIBZFS_H const char *zfs_history_event_names[];
typedef enum {
VDEV_NAME_PATH = 1 << 0,
VDEV_NAME_GUID = 1 << 1,
VDEV_NAME_FOLLOW_LINKS = 1 << 2,
VDEV_NAME_TYPE_ID = 1 << 3,
} vdev_name_t;
_LIBZFS_H char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *,
int name_flags);
_LIBZFS_H int zpool_upgrade(zpool_handle_t *, uint64_t);
_LIBZFS_H int zpool_get_history(zpool_handle_t *, nvlist_t **, uint64_t *,
boolean_t *);
_LIBZFS_H int zpool_events_next(libzfs_handle_t *, nvlist_t **, int *, unsigned,
int);
_LIBZFS_H int zpool_events_clear(libzfs_handle_t *, int *);
_LIBZFS_H int zpool_events_seek(libzfs_handle_t *, uint64_t, int);
_LIBZFS_H void zpool_obj_to_path_ds(zpool_handle_t *, uint64_t, uint64_t,
char *, size_t);
_LIBZFS_H void zpool_obj_to_path(zpool_handle_t *, uint64_t, uint64_t, char *,
size_t);
_LIBZFS_H int zfs_ioctl(libzfs_handle_t *, int, struct zfs_cmd *);
_LIBZFS_H int zpool_get_physpath(zpool_handle_t *, char *, size_t);
_LIBZFS_H void zpool_explain_recover(libzfs_handle_t *, const char *, int,
nvlist_t *);
_LIBZFS_H int zpool_checkpoint(zpool_handle_t *);
_LIBZFS_H int zpool_discard_checkpoint(zpool_handle_t *);
_LIBZFS_H boolean_t zpool_is_draid_spare(const char *);
/*
* Basic handle manipulations. These functions do not create or destroy the
* underlying datasets, only the references to them.
*/
_LIBZFS_H zfs_handle_t *zfs_open(libzfs_handle_t *, const char *, int);
_LIBZFS_H zfs_handle_t *zfs_handle_dup(zfs_handle_t *);
_LIBZFS_H void zfs_close(zfs_handle_t *);
_LIBZFS_H zfs_type_t zfs_get_type(const zfs_handle_t *);
_LIBZFS_H zfs_type_t zfs_get_underlying_type(const zfs_handle_t *);
_LIBZFS_H const char *zfs_get_name(const zfs_handle_t *);
_LIBZFS_H zpool_handle_t *zfs_get_pool_handle(const zfs_handle_t *);
_LIBZFS_H const char *zfs_get_pool_name(const zfs_handle_t *);
/*
* Property management functions. Some functions are shared with the kernel,
* and are found in sys/fs/zfs.h.
*/
/*
* zfs dataset property management
*/
_LIBZFS_H const char *zfs_prop_default_string(zfs_prop_t);
_LIBZFS_H uint64_t zfs_prop_default_numeric(zfs_prop_t);
_LIBZFS_H const char *zfs_prop_column_name(zfs_prop_t);
_LIBZFS_H boolean_t zfs_prop_align_right(zfs_prop_t);
_LIBZFS_H nvlist_t *zfs_valid_proplist(libzfs_handle_t *, zfs_type_t,
nvlist_t *, uint64_t, zfs_handle_t *, zpool_handle_t *, boolean_t,
const char *);
_LIBZFS_H const char *zfs_prop_to_name(zfs_prop_t);
_LIBZFS_H int zfs_prop_set(zfs_handle_t *, const char *, const char *);
_LIBZFS_H int zfs_prop_set_list(zfs_handle_t *, nvlist_t *);
_LIBZFS_H int zfs_prop_get(zfs_handle_t *, zfs_prop_t, char *, size_t,
zprop_source_t *, char *, size_t, boolean_t);
_LIBZFS_H int zfs_prop_get_recvd(zfs_handle_t *, const char *, char *, size_t,
boolean_t);
_LIBZFS_H int zfs_prop_get_numeric(zfs_handle_t *, zfs_prop_t, uint64_t *,
zprop_source_t *, char *, size_t);
_LIBZFS_H int zfs_prop_get_userquota_int(zfs_handle_t *zhp,
const char *propname, uint64_t *propvalue);
_LIBZFS_H int zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal);
_LIBZFS_H int zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue);
_LIBZFS_H int zfs_prop_get_written(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal);
_LIBZFS_H int zfs_prop_get_feature(zfs_handle_t *zhp, const char *propname,
char *buf, size_t len);
_LIBZFS_H uint64_t getprop_uint64(zfs_handle_t *, zfs_prop_t, char **);
_LIBZFS_H uint64_t zfs_prop_get_int(zfs_handle_t *, zfs_prop_t);
_LIBZFS_H int zfs_prop_inherit(zfs_handle_t *, const char *, boolean_t);
_LIBZFS_H const char *zfs_prop_values(zfs_prop_t);
_LIBZFS_H int zfs_prop_is_string(zfs_prop_t prop);
_LIBZFS_H nvlist_t *zfs_get_all_props(zfs_handle_t *);
_LIBZFS_H nvlist_t *zfs_get_user_props(zfs_handle_t *);
_LIBZFS_H nvlist_t *zfs_get_recvd_props(zfs_handle_t *);
_LIBZFS_H nvlist_t *zfs_get_clones_nvl(zfs_handle_t *);
_LIBZFS_H int zfs_wait_status(zfs_handle_t *, zfs_wait_activity_t,
boolean_t *, boolean_t *);
/*
* zfs encryption management
*/
_LIBZFS_H int zfs_crypto_get_encryption_root(zfs_handle_t *, boolean_t *,
char *);
_LIBZFS_H int zfs_crypto_create(libzfs_handle_t *, char *, nvlist_t *,
nvlist_t *, boolean_t stdin_available, uint8_t **, uint_t *);
_LIBZFS_H int zfs_crypto_clone_check(libzfs_handle_t *, zfs_handle_t *, char *,
nvlist_t *);
_LIBZFS_H int zfs_crypto_attempt_load_keys(libzfs_handle_t *, char *);
_LIBZFS_H int zfs_crypto_load_key(zfs_handle_t *, boolean_t, char *);
_LIBZFS_H int zfs_crypto_unload_key(zfs_handle_t *);
_LIBZFS_H int zfs_crypto_rewrap(zfs_handle_t *, nvlist_t *, boolean_t);
typedef struct zprop_list {
int pl_prop;
char *pl_user_prop;
struct zprop_list *pl_next;
boolean_t pl_all;
size_t pl_width;
size_t pl_recvd_width;
boolean_t pl_fixed;
} zprop_list_t;
_LIBZFS_H int zfs_expand_proplist(zfs_handle_t *, zprop_list_t **, boolean_t,
boolean_t);
_LIBZFS_H void zfs_prune_proplist(zfs_handle_t *, uint8_t *);
#define ZFS_MOUNTPOINT_NONE "none"
#define ZFS_MOUNTPOINT_LEGACY "legacy"
#define ZFS_FEATURE_DISABLED "disabled"
#define ZFS_FEATURE_ENABLED "enabled"
#define ZFS_FEATURE_ACTIVE "active"
#define ZFS_UNSUPPORTED_INACTIVE "inactive"
#define ZFS_UNSUPPORTED_READONLY "readonly"
/*
* zpool property management
*/
_LIBZFS_H int zpool_expand_proplist(zpool_handle_t *, zprop_list_t **,
boolean_t);
_LIBZFS_H int zpool_prop_get_feature(zpool_handle_t *, const char *, char *,
size_t);
_LIBZFS_H const char *zpool_prop_default_string(zpool_prop_t);
_LIBZFS_H uint64_t zpool_prop_default_numeric(zpool_prop_t);
_LIBZFS_H const char *zpool_prop_column_name(zpool_prop_t);
_LIBZFS_H boolean_t zpool_prop_align_right(zpool_prop_t);
/*
* Functions shared by zfs and zpool property management.
*/
_LIBZFS_H int zprop_iter(zprop_func func, void *cb, boolean_t show_all,
boolean_t ordered, zfs_type_t type);
_LIBZFS_H int zprop_get_list(libzfs_handle_t *, char *, zprop_list_t **,
zfs_type_t);
_LIBZFS_H void zprop_free_list(zprop_list_t *);
#define ZFS_GET_NCOLS 5
typedef enum {
GET_COL_NONE,
GET_COL_NAME,
GET_COL_PROPERTY,
GET_COL_VALUE,
GET_COL_RECVD,
GET_COL_SOURCE
} zfs_get_column_t;
/*
* Functions for printing zfs or zpool properties
*/
typedef struct zprop_get_cbdata {
int cb_sources;
zfs_get_column_t cb_columns[ZFS_GET_NCOLS];
int cb_colwidths[ZFS_GET_NCOLS + 1];
boolean_t cb_scripted;
boolean_t cb_literal;
boolean_t cb_first;
zprop_list_t *cb_proplist;
zfs_type_t cb_type;
} zprop_get_cbdata_t;
_LIBZFS_H void zprop_print_one_property(const char *, zprop_get_cbdata_t *,
const char *, const char *, zprop_source_t, const char *,
const char *);
/*
* Iterator functions.
*/
typedef int (*zfs_iter_f)(zfs_handle_t *, void *);
_LIBZFS_H int zfs_iter_root(libzfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_children(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_dependents(zfs_handle_t *, boolean_t, zfs_iter_f,
void *);
_LIBZFS_H int zfs_iter_filesystems(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_snapshots(zfs_handle_t *, boolean_t, zfs_iter_f, void *,
uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapshots_sorted(zfs_handle_t *, zfs_iter_f, void *,
uint64_t, uint64_t);
_LIBZFS_H int zfs_iter_snapspec(zfs_handle_t *, const char *, zfs_iter_f,
void *);
_LIBZFS_H int zfs_iter_bookmarks(zfs_handle_t *, zfs_iter_f, void *);
_LIBZFS_H int zfs_iter_mounted(zfs_handle_t *, zfs_iter_f, void *);
typedef struct get_all_cb {
zfs_handle_t **cb_handles;
size_t cb_alloc;
size_t cb_used;
} get_all_cb_t;
_LIBZFS_H void zfs_foreach_mountpoint(libzfs_handle_t *, zfs_handle_t **,
size_t, zfs_iter_f, void *, boolean_t);
_LIBZFS_H void libzfs_add_handle(get_all_cb_t *, zfs_handle_t *);
/*
* Functions to create and destroy datasets.
*/
_LIBZFS_H int zfs_create(libzfs_handle_t *, const char *, zfs_type_t,
nvlist_t *);
_LIBZFS_H int zfs_create_ancestors(libzfs_handle_t *, const char *);
_LIBZFS_H int zfs_destroy(zfs_handle_t *, boolean_t);
_LIBZFS_H int zfs_destroy_snaps(zfs_handle_t *, char *, boolean_t);
_LIBZFS_H int zfs_destroy_snaps_nvl(libzfs_handle_t *, nvlist_t *, boolean_t);
_LIBZFS_H int zfs_clone(zfs_handle_t *, const char *, nvlist_t *);
_LIBZFS_H int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t,
nvlist_t *);
_LIBZFS_H int zfs_snapshot_nvl(libzfs_handle_t *hdl, nvlist_t *snaps,
nvlist_t *props);
_LIBZFS_H int zfs_rollback(zfs_handle_t *, zfs_handle_t *, boolean_t);
typedef struct renameflags {
/* recursive rename */
int recursive : 1;
/* don't unmount file systems */
int nounmount : 1;
/* force unmount file systems */
int forceunmount : 1;
} renameflags_t;
_LIBZFS_H int zfs_rename(zfs_handle_t *, const char *, renameflags_t);
typedef struct sendflags {
/* Amount of extra information to print. */
int verbosity;
/* recursive send (ie, -R) */
boolean_t replicate;
/* for recursive send, skip sending missing snapshots */
boolean_t skipmissing;
/* for incrementals, do all intermediate snapshots */
boolean_t doall;
/* if dataset is a clone, do incremental from its origin */
boolean_t fromorigin;
/* field no longer used, maintained for backwards compatibility */
boolean_t pad;
/* send properties (ie, -p) */
boolean_t props;
/* do not send (no-op, ie. -n) */
boolean_t dryrun;
/* parsable verbose output (ie. -P) */
boolean_t parsable;
/* show progress (ie. -v) */
boolean_t progress;
/* large blocks (>128K) are permitted */
boolean_t largeblock;
/* WRITE_EMBEDDED records of type DATA are permitted */
boolean_t embed_data;
/* compressed WRITE records are permitted */
boolean_t compress;
/* raw encrypted records are permitted */
boolean_t raw;
/* only send received properties (ie. -b) */
boolean_t backup;
/* include snapshot holds in send stream */
boolean_t holds;
/* stream represents a partially received dataset */
boolean_t saved;
} sendflags_t;
typedef boolean_t (snapfilter_cb_t)(zfs_handle_t *, void *);
_LIBZFS_H int zfs_send(zfs_handle_t *, const char *, const char *,
sendflags_t *, int, snapfilter_cb_t, void *, nvlist_t **);
_LIBZFS_H int zfs_send_one(zfs_handle_t *, const char *, int, sendflags_t *,
const char *);
_LIBZFS_H int zfs_send_progress(zfs_handle_t *, int, uint64_t *, uint64_t *);
_LIBZFS_H int zfs_send_resume(libzfs_handle_t *, sendflags_t *, int outfd,
const char *);
_LIBZFS_H int zfs_send_saved(zfs_handle_t *, sendflags_t *, int, const char *);
_LIBZFS_H nvlist_t *zfs_send_resume_token_to_nvlist(libzfs_handle_t *hdl,
const char *token);
_LIBZFS_H int zfs_promote(zfs_handle_t *);
_LIBZFS_H int zfs_hold(zfs_handle_t *, const char *, const char *,
boolean_t, int);
_LIBZFS_H int zfs_hold_nvl(zfs_handle_t *, int, nvlist_t *);
_LIBZFS_H int zfs_release(zfs_handle_t *, const char *, const char *,
boolean_t);
_LIBZFS_H int zfs_get_holds(zfs_handle_t *, nvlist_t **);
_LIBZFS_H uint64_t zvol_volsize_to_reservation(zpool_handle_t *, uint64_t,
nvlist_t *);
typedef int (*zfs_userspace_cb_t)(void *arg, const char *domain,
uid_t rid, uint64_t space);
_LIBZFS_H int zfs_userspace(zfs_handle_t *, zfs_userquota_prop_t,
zfs_userspace_cb_t, void *);
_LIBZFS_H int zfs_get_fsacl(zfs_handle_t *, nvlist_t **);
_LIBZFS_H int zfs_set_fsacl(zfs_handle_t *, boolean_t, nvlist_t *);
typedef struct recvflags {
/* print informational messages (ie, -v was specified) */
boolean_t verbose;
/* the destination is a prefix, not the exact fs (ie, -d) */
boolean_t isprefix;
/*
* Only the tail of the sent snapshot path is appended to the
* destination to determine the received snapshot name (ie, -e).
*/
boolean_t istail;
/* do not actually do the recv, just check if it would work (ie, -n) */
boolean_t dryrun;
/* rollback/destroy filesystems as necessary (eg, -F) */
boolean_t force;
/* set "canmount=off" on all modified filesystems */
boolean_t canmountoff;
/*
* Mark the file systems as "resumable" and do not destroy them if the
* receive is interrupted
*/
boolean_t resumable;
/* byteswap flag is used internally; callers need not specify */
boolean_t byteswap;
/* do not mount file systems as they are extracted (private) */
boolean_t nomount;
/* Was holds flag set in the compound header? */
boolean_t holds;
/* skip receive of snapshot holds */
boolean_t skipholds;
/* mount the filesystem unless nomount is specified */
boolean_t domount;
/* force unmount while recv snapshot (private) */
boolean_t forceunmount;
} recvflags_t;
_LIBZFS_H int zfs_receive(libzfs_handle_t *, const char *, nvlist_t *,
recvflags_t *, int, avl_tree_t *);
typedef enum diff_flags {
ZFS_DIFF_PARSEABLE = 0x1,
ZFS_DIFF_TIMESTAMP = 0x2,
ZFS_DIFF_CLASSIFY = 0x4
} diff_flags_t;
_LIBZFS_H int zfs_show_diffs(zfs_handle_t *, int, const char *, const char *,
int);
/*
* Miscellaneous functions.
*/
_LIBZFS_H const char *zfs_type_to_name(zfs_type_t);
_LIBZFS_H void zfs_refresh_properties(zfs_handle_t *);
_LIBZFS_H int zfs_name_valid(const char *, zfs_type_t);
_LIBZFS_H zfs_handle_t *zfs_path_to_zhandle(libzfs_handle_t *, const char *,
zfs_type_t);
_LIBZFS_H int zfs_parent_name(zfs_handle_t *, char *, size_t);
_LIBZFS_H boolean_t zfs_dataset_exists(libzfs_handle_t *, const char *,
zfs_type_t);
_LIBZFS_H int zfs_spa_version(zfs_handle_t *, int *);
_LIBZFS_H boolean_t zfs_bookmark_exists(const char *path);
/*
* Mount support functions.
*/
_LIBZFS_H boolean_t is_mounted(libzfs_handle_t *, const char *special, char **);
_LIBZFS_H boolean_t zfs_is_mounted(zfs_handle_t *, char **);
_LIBZFS_H int zfs_mount(zfs_handle_t *, const char *, int);
_LIBZFS_H int zfs_mount_at(zfs_handle_t *, const char *, int, const char *);
_LIBZFS_H int zfs_unmount(zfs_handle_t *, const char *, int);
_LIBZFS_H int zfs_unmountall(zfs_handle_t *, int);
_LIBZFS_H int zfs_mount_delegation_check(void);
-#if defined(__linux__)
+#if defined(__linux__) || defined(__APPLE__)
_LIBZFS_H int zfs_parse_mount_options(char *mntopts, unsigned long *mntflags,
unsigned long *zfsflags, int sloppy, char *badopt, char *mtabopt);
_LIBZFS_H void zfs_adjust_mount_options(zfs_handle_t *zhp, const char *mntpoint,
char *mntopts, char *mtabopt);
#endif
/*
* Share support functions.
*/
_LIBZFS_H boolean_t zfs_is_shared(zfs_handle_t *);
_LIBZFS_H int zfs_share(zfs_handle_t *);
_LIBZFS_H int zfs_unshare(zfs_handle_t *);
/*
* Protocol-specific share support functions.
*/
_LIBZFS_H boolean_t zfs_is_shared_nfs(zfs_handle_t *, char **);
_LIBZFS_H boolean_t zfs_is_shared_smb(zfs_handle_t *, char **);
_LIBZFS_H int zfs_share_nfs(zfs_handle_t *);
_LIBZFS_H int zfs_share_smb(zfs_handle_t *);
_LIBZFS_H int zfs_shareall(zfs_handle_t *);
_LIBZFS_H int zfs_unshare_nfs(zfs_handle_t *, const char *);
_LIBZFS_H int zfs_unshare_smb(zfs_handle_t *, const char *);
_LIBZFS_H int zfs_unshareall_nfs(zfs_handle_t *);
_LIBZFS_H int zfs_unshareall_smb(zfs_handle_t *);
_LIBZFS_H int zfs_unshareall_bypath(zfs_handle_t *, const char *);
_LIBZFS_H int zfs_unshareall_bytype(zfs_handle_t *, const char *, const char *);
_LIBZFS_H int zfs_unshareall(zfs_handle_t *);
_LIBZFS_H int zfs_deleg_share_nfs(libzfs_handle_t *, char *, char *, char *,
void *, void *, int, zfs_share_op_t);
_LIBZFS_H void zfs_commit_nfs_shares(void);
_LIBZFS_H void zfs_commit_smb_shares(void);
_LIBZFS_H void zfs_commit_all_shares(void);
_LIBZFS_H void zfs_commit_shares(const char *);
_LIBZFS_H int zfs_nicestrtonum(libzfs_handle_t *, const char *, uint64_t *);
/*
* Utility functions to run an external process.
*/
#define STDOUT_VERBOSE 0x01
#define STDERR_VERBOSE 0x02
#define NO_DEFAULT_PATH 0x04 /* Don't use $PATH to lookup the command */
_LIBZFS_H int libzfs_run_process(const char *, char **, int);
_LIBZFS_H int libzfs_run_process_get_stdout(const char *, char *[], char *[],
char **[], int *);
_LIBZFS_H int libzfs_run_process_get_stdout_nopath(const char *, char *[],
char *[], char **[], int *);
_LIBZFS_H void libzfs_free_str_array(char **, int);
_LIBZFS_H int libzfs_envvar_is_set(char *);
/*
* Utility functions for zfs version
*/
_LIBZFS_H void zfs_version_userland(char *, int);
_LIBZFS_H int zfs_version_kernel(char *, int);
_LIBZFS_H int zfs_version_print(void);
/*
* Given a device or file, determine if it is part of a pool.
*/
_LIBZFS_H int zpool_in_use(libzfs_handle_t *, int, pool_state_t *, char **,
boolean_t *);
/*
* Label manipulation.
*/
_LIBZFS_H int zpool_clear_label(int);
_LIBZFS_H int zpool_set_bootenv(zpool_handle_t *, const nvlist_t *);
_LIBZFS_H int zpool_get_bootenv(zpool_handle_t *, nvlist_t **);
/*
* Management interfaces for SMB ACL files
*/
_LIBZFS_H int zfs_smb_acl_add(libzfs_handle_t *, char *, char *, char *);
_LIBZFS_H int zfs_smb_acl_remove(libzfs_handle_t *, char *, char *, char *);
_LIBZFS_H int zfs_smb_acl_purge(libzfs_handle_t *, char *, char *);
_LIBZFS_H int zfs_smb_acl_rename(libzfs_handle_t *, char *, char *, char *,
char *);
/*
* Enable and disable datasets within a pool by mounting/unmounting and
* sharing/unsharing them.
*/
_LIBZFS_H int zpool_enable_datasets(zpool_handle_t *, const char *, int);
_LIBZFS_H int zpool_disable_datasets(zpool_handle_t *, boolean_t);
+_LIBZFS_H void zpool_disable_datasets_os(zpool_handle_t *, boolean_t);
+_LIBZFS_H void zpool_disable_volume_os(const char *);
/*
* Parse a features file for -o compatibility
*/
typedef enum {
ZPOOL_COMPATIBILITY_OK,
ZPOOL_COMPATIBILITY_WARNTOKEN,
ZPOOL_COMPATIBILITY_BADTOKEN,
ZPOOL_COMPATIBILITY_BADFILE,
ZPOOL_COMPATIBILITY_NOFILES
} zpool_compat_status_t;
_LIBZFS_H zpool_compat_status_t zpool_load_compat(const char *,
boolean_t *, char *, size_t);
#ifdef __FreeBSD__
/*
* Attach/detach the given filesystem to/from the given jail.
*/
_LIBZFS_H int zfs_jail(zfs_handle_t *zhp, int jailid, int attach);
/*
* Set loader options for next boot.
*/
_LIBZFS_H int zpool_nextboot(libzfs_handle_t *, uint64_t, uint64_t,
const char *);
#endif /* __FreeBSD__ */
#ifdef __cplusplus
}
#endif
#endif /* _LIBZFS_H */
diff --git a/sys/contrib/openzfs/include/os/freebsd/linux/compiler.h b/sys/contrib/openzfs/include/os/freebsd/linux/compiler.h
index 05e93efa64d7..20903717b58d 100644
--- a/sys/contrib/openzfs/include/os/freebsd/linux/compiler.h
+++ b/sys/contrib/openzfs/include/os/freebsd/linux/compiler.h
@@ -1,101 +1,102 @@
/*
* Copyright (c) 2010 Isilon Systems, Inc.
* Copyright (c) 2010 iXsystems, Inc.
* Copyright (c) 2010 Panasas, Inc.
* Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
* Copyright (c) 2015 François Tigeot
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice unmodified, this list of conditions, and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $FreeBSD$
*/
#ifndef _LINUX_COMPILER_H_
#define _LINUX_COMPILER_H_
#include <sys/cdefs.h>
#define __user
#define __kernel
#define __safe
#define __force
#define __nocast
#define __iomem
#define __chk_user_ptr(x) ((void)0)
#define __chk_io_ptr(x) ((void)0)
#define __builtin_warning(x, y...) (1)
#define __acquires(x)
#define __releases(x)
#define __acquire(x) do { } while (0)
#define __release(x) do { } while (0)
#define __cond_lock(x, c) (c)
#define __bitwise
#define __devinitdata
#define __deprecated
#define __init
#define __initconst
#define __devinit
#define __devexit
#define __exit
#define __rcu
#define __percpu
#define __weak __weak_symbol
#define __malloc
#define ___stringify(...) #__VA_ARGS__
#define __stringify(...) ___stringify(__VA_ARGS__)
#define __attribute_const__ __attribute__((__const__))
#undef __always_inline
#define __always_inline inline
#define noinline __noinline
#define ____cacheline_aligned __aligned(CACHE_LINE_SIZE)
+#define fallthrough __attribute__((__fallthrough__))
#if !defined(_KERNEL) && !defined(_STANDALONE)
#define likely(x) __builtin_expect(!!(x), 1)
#define unlikely(x) __builtin_expect(!!(x), 0)
#endif
#define typeof(x) __typeof(x)
#define uninitialized_var(x) x = x
#define __maybe_unused __unused
#define __always_unused __unused
#define __must_check __result_use_check
#define __printf(a, b) __printflike(a, b)
#define barrier() __asm__ __volatile__("": : :"memory")
#define smp_rmb() rmb()
#define ___PASTE(a, b) a##b
#define __PASTE(a, b) ___PASTE(a, b)
#define ACCESS_ONCE(x) (*(volatile __typeof(x) *)&(x))
#define WRITE_ONCE(x, v) do { \
barrier(); \
ACCESS_ONCE(x) = (v); \
barrier(); \
} while (0)
#define lockless_dereference(p) READ_ONCE(p)
#define _AT(T, X) ((T)(X))
#endif /* _LINUX_COMPILER_H_ */
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
index 87d541072015..019d5390adec 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/blkdev_compat.h
@@ -1,579 +1,582 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2011 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049.
*/
#ifndef _ZFS_BLKDEV_H
#define _ZFS_BLKDEV_H
#include <linux/blkdev.h>
#include <linux/elevator.h>
#include <linux/backing-dev.h>
#include <linux/hdreg.h>
#include <linux/msdos_fs.h> /* for SECTOR_* */
#ifndef HAVE_BLK_QUEUE_FLAG_SET
static inline void
blk_queue_flag_set(unsigned int flag, struct request_queue *q)
{
queue_flag_set(flag, q);
}
#endif
#ifndef HAVE_BLK_QUEUE_FLAG_CLEAR
static inline void
blk_queue_flag_clear(unsigned int flag, struct request_queue *q)
{
queue_flag_clear(flag, q);
}
#endif
/*
* 4.7 API,
* The blk_queue_write_cache() interface has replaced blk_queue_flush()
* interface. However, the new interface is GPL-only thus we implement
* our own trivial wrapper when the GPL-only version is detected.
*
* 2.6.36 - 4.6 API,
* The blk_queue_flush() interface has replaced blk_queue_ordered()
* interface. However, while the old interface was available to all the
* new one is GPL-only. Thus if the GPL-only version is detected we
* implement our own trivial helper.
*/
static inline void
blk_queue_set_write_cache(struct request_queue *q, bool wc, bool fua)
{
#if defined(HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY)
if (wc)
blk_queue_flag_set(QUEUE_FLAG_WC, q);
else
blk_queue_flag_clear(QUEUE_FLAG_WC, q);
if (fua)
blk_queue_flag_set(QUEUE_FLAG_FUA, q);
else
blk_queue_flag_clear(QUEUE_FLAG_FUA, q);
#elif defined(HAVE_BLK_QUEUE_WRITE_CACHE)
blk_queue_write_cache(q, wc, fua);
#elif defined(HAVE_BLK_QUEUE_FLUSH_GPL_ONLY)
if (wc)
q->flush_flags |= REQ_FLUSH;
if (fua)
q->flush_flags |= REQ_FUA;
#elif defined(HAVE_BLK_QUEUE_FLUSH)
blk_queue_flush(q, (wc ? REQ_FLUSH : 0) | (fua ? REQ_FUA : 0));
#else
#error "Unsupported kernel"
#endif
}
static inline void
blk_queue_set_read_ahead(struct request_queue *q, unsigned long ra_pages)
{
+#if !defined(HAVE_BLK_QUEUE_UPDATE_READAHEAD) && \
+ !defined(HAVE_DISK_UPDATE_READAHEAD)
#ifdef HAVE_BLK_QUEUE_BDI_DYNAMIC
q->backing_dev_info->ra_pages = ra_pages;
#else
q->backing_dev_info.ra_pages = ra_pages;
#endif
+#endif
}
#ifdef HAVE_BIO_BVEC_ITER
#define BIO_BI_SECTOR(bio) (bio)->bi_iter.bi_sector
#define BIO_BI_SIZE(bio) (bio)->bi_iter.bi_size
#define BIO_BI_IDX(bio) (bio)->bi_iter.bi_idx
#define BIO_BI_SKIP(bio) (bio)->bi_iter.bi_bvec_done
#define bio_for_each_segment4(bv, bvp, b, i) \
bio_for_each_segment((bv), (b), (i))
typedef struct bvec_iter bvec_iterator_t;
#else
#define BIO_BI_SECTOR(bio) (bio)->bi_sector
#define BIO_BI_SIZE(bio) (bio)->bi_size
#define BIO_BI_IDX(bio) (bio)->bi_idx
#define BIO_BI_SKIP(bio) (0)
#define bio_for_each_segment4(bv, bvp, b, i) \
bio_for_each_segment((bvp), (b), (i))
typedef int bvec_iterator_t;
#endif
static inline void
bio_set_flags_failfast(struct block_device *bdev, int *flags)
{
#ifdef CONFIG_BUG
/*
* Disable FAILFAST for loopback devices because of the
* following incorrect BUG_ON() in loop_make_request().
* This support is also disabled for md devices because the
* test suite layers md devices on top of loopback devices.
* This may be removed when the loopback driver is fixed.
*
* BUG_ON(!lo || (rw != READ && rw != WRITE));
*/
if ((MAJOR(bdev->bd_dev) == LOOP_MAJOR) ||
(MAJOR(bdev->bd_dev) == MD_MAJOR))
return;
#ifdef BLOCK_EXT_MAJOR
if (MAJOR(bdev->bd_dev) == BLOCK_EXT_MAJOR)
return;
#endif /* BLOCK_EXT_MAJOR */
#endif /* CONFIG_BUG */
*flags |= REQ_FAILFAST_MASK;
}
/*
* Maximum disk label length, it may be undefined for some kernels.
*/
#if !defined(DISK_NAME_LEN)
#define DISK_NAME_LEN 32
#endif /* DISK_NAME_LEN */
#ifdef HAVE_BIO_BI_STATUS
static inline int
bi_status_to_errno(blk_status_t status)
{
switch (status) {
case BLK_STS_OK:
return (0);
case BLK_STS_NOTSUPP:
return (EOPNOTSUPP);
case BLK_STS_TIMEOUT:
return (ETIMEDOUT);
case BLK_STS_NOSPC:
return (ENOSPC);
case BLK_STS_TRANSPORT:
return (ENOLINK);
case BLK_STS_TARGET:
return (EREMOTEIO);
case BLK_STS_NEXUS:
return (EBADE);
case BLK_STS_MEDIUM:
return (ENODATA);
case BLK_STS_PROTECTION:
return (EILSEQ);
case BLK_STS_RESOURCE:
return (ENOMEM);
case BLK_STS_AGAIN:
return (EAGAIN);
case BLK_STS_IOERR:
return (EIO);
default:
return (EIO);
}
}
static inline blk_status_t
errno_to_bi_status(int error)
{
switch (error) {
case 0:
return (BLK_STS_OK);
case EOPNOTSUPP:
return (BLK_STS_NOTSUPP);
case ETIMEDOUT:
return (BLK_STS_TIMEOUT);
case ENOSPC:
return (BLK_STS_NOSPC);
case ENOLINK:
return (BLK_STS_TRANSPORT);
case EREMOTEIO:
return (BLK_STS_TARGET);
case EBADE:
return (BLK_STS_NEXUS);
case ENODATA:
return (BLK_STS_MEDIUM);
case EILSEQ:
return (BLK_STS_PROTECTION);
case ENOMEM:
return (BLK_STS_RESOURCE);
case EAGAIN:
return (BLK_STS_AGAIN);
case EIO:
return (BLK_STS_IOERR);
default:
return (BLK_STS_IOERR);
}
}
#endif /* HAVE_BIO_BI_STATUS */
/*
* 4.3 API change
* The bio_endio() prototype changed slightly. These are helper
* macro's to ensure the prototype and invocation are handled.
*/
#ifdef HAVE_1ARG_BIO_END_IO_T
#ifdef HAVE_BIO_BI_STATUS
#define BIO_END_IO_ERROR(bio) bi_status_to_errno(bio->bi_status)
#define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x)
#define BIO_END_IO(bio, error) bio_set_bi_status(bio, error)
static inline void
bio_set_bi_status(struct bio *bio, int error)
{
ASSERT3S(error, <=, 0);
bio->bi_status = errno_to_bi_status(-error);
bio_endio(bio);
}
#else
#define BIO_END_IO_ERROR(bio) (-(bio->bi_error))
#define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x)
#define BIO_END_IO(bio, error) bio_set_bi_error(bio, error)
static inline void
bio_set_bi_error(struct bio *bio, int error)
{
ASSERT3S(error, <=, 0);
bio->bi_error = error;
bio_endio(bio);
}
#endif /* HAVE_BIO_BI_STATUS */
#else
#define BIO_END_IO_PROTO(fn, x, z) static void fn(struct bio *x, int z)
#define BIO_END_IO(bio, error) bio_endio(bio, error);
#endif /* HAVE_1ARG_BIO_END_IO_T */
/*
* 4.1 API,
* 3.10.0 CentOS 7.x API,
* blkdev_reread_part()
*
* For older kernels trigger a re-reading of the partition table by calling
* check_disk_change() which calls flush_disk() to invalidate the device.
*
* For newer kernels (as of 5.10), bdev_check_media_change is used, in favor of
* check_disk_change(), with the modification that invalidation is no longer
* forced.
*/
#ifdef HAVE_CHECK_DISK_CHANGE
#define zfs_check_media_change(bdev) check_disk_change(bdev)
#ifdef HAVE_BLKDEV_REREAD_PART
#define vdev_bdev_reread_part(bdev) blkdev_reread_part(bdev)
#else
#define vdev_bdev_reread_part(bdev) check_disk_change(bdev)
#endif /* HAVE_BLKDEV_REREAD_PART */
#else
#ifdef HAVE_BDEV_CHECK_MEDIA_CHANGE
static inline int
zfs_check_media_change(struct block_device *bdev)
{
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
struct gendisk *gd = bdev->bd_disk;
const struct block_device_operations *bdo = gd->fops;
#endif
if (!bdev_check_media_change(bdev))
return (0);
#ifdef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK
/*
* Force revalidation, to mimic the old behavior of
* check_disk_change()
*/
if (bdo->revalidate_disk)
bdo->revalidate_disk(gd);
#endif
return (0);
}
#define vdev_bdev_reread_part(bdev) zfs_check_media_change(bdev)
#else
/*
* This is encountered if check_disk_change() and bdev_check_media_change()
* are not available in the kernel - likely due to an API change that needs
* to be chased down.
*/
#error "Unsupported kernel: no usable disk change check"
#endif /* HAVE_BDEV_CHECK_MEDIA_CHANGE */
#endif /* HAVE_CHECK_DISK_CHANGE */
/*
* 2.6.27 API change
* The function was exported for use, prior to this it existed but the
* symbol was not exported.
*
* 4.4.0-6.21 API change for Ubuntu
* lookup_bdev() gained a second argument, FMODE_*, to check inode permissions.
*
* 5.11 API change
* Changed to take a dev_t argument which is set on success and return a
* non-zero error code on failure.
*/
static inline int
vdev_lookup_bdev(const char *path, dev_t *dev)
{
#if defined(HAVE_DEVT_LOOKUP_BDEV)
return (lookup_bdev(path, dev));
#elif defined(HAVE_1ARG_LOOKUP_BDEV)
struct block_device *bdev = lookup_bdev(path);
if (IS_ERR(bdev))
return (PTR_ERR(bdev));
*dev = bdev->bd_dev;
bdput(bdev);
return (0);
#elif defined(HAVE_MODE_LOOKUP_BDEV)
struct block_device *bdev = lookup_bdev(path, FMODE_READ);
if (IS_ERR(bdev))
return (PTR_ERR(bdev));
*dev = bdev->bd_dev;
bdput(bdev);
return (0);
#else
#error "Unsupported kernel"
#endif
}
/*
* Kernels without bio_set_op_attrs use bi_rw for the bio flags.
*/
#if !defined(HAVE_BIO_SET_OP_ATTRS)
static inline void
bio_set_op_attrs(struct bio *bio, unsigned rw, unsigned flags)
{
bio->bi_rw |= rw | flags;
}
#endif
/*
* bio_set_flush - Set the appropriate flags in a bio to guarantee
* data are on non-volatile media on completion.
*
* 2.6.37 - 4.8 API,
* Introduce WRITE_FLUSH, WRITE_FUA, and WRITE_FLUSH_FUA flags as a
* replacement for WRITE_BARRIER to allow expressing richer semantics
* to the block layer. It's up to the block layer to implement the
* semantics correctly. Use the WRITE_FLUSH_FUA flag combination.
*
* 4.8 - 4.9 API,
* REQ_FLUSH was renamed to REQ_PREFLUSH. For consistency with previous
* OpenZFS releases, prefer the WRITE_FLUSH_FUA flag set if it's available.
*
* 4.10 API,
* The read/write flags and their modifiers, including WRITE_FLUSH,
* WRITE_FUA and WRITE_FLUSH_FUA were removed from fs.h in
* torvalds/linux@70fd7614 and replaced by direct flag modification
* of the REQ_ flags in bio->bi_opf. Use REQ_PREFLUSH.
*/
static inline void
bio_set_flush(struct bio *bio)
{
#if defined(HAVE_REQ_PREFLUSH) /* >= 4.10 */
bio_set_op_attrs(bio, 0, REQ_PREFLUSH);
#elif defined(WRITE_FLUSH_FUA) /* >= 2.6.37 and <= 4.9 */
bio_set_op_attrs(bio, 0, WRITE_FLUSH_FUA);
#else
#error "Allowing the build will cause bio_set_flush requests to be ignored."
#endif
}
/*
* 4.8 API,
* REQ_OP_FLUSH
*
* 4.8-rc0 - 4.8-rc1,
* REQ_PREFLUSH
*
* 2.6.36 - 4.7 API,
* REQ_FLUSH
*
* in all cases but may have a performance impact for some kernels. It
* has the advantage of minimizing kernel specific changes in the zvol code.
*
*/
static inline boolean_t
bio_is_flush(struct bio *bio)
{
#if defined(HAVE_REQ_OP_FLUSH) && defined(HAVE_BIO_BI_OPF)
return ((bio_op(bio) == REQ_OP_FLUSH) || (bio->bi_opf & REQ_PREFLUSH));
#elif defined(HAVE_REQ_PREFLUSH) && defined(HAVE_BIO_BI_OPF)
return (bio->bi_opf & REQ_PREFLUSH);
#elif defined(HAVE_REQ_PREFLUSH) && !defined(HAVE_BIO_BI_OPF)
return (bio->bi_rw & REQ_PREFLUSH);
#elif defined(HAVE_REQ_FLUSH)
return (bio->bi_rw & REQ_FLUSH);
#else
#error "Unsupported kernel"
#endif
}
/*
* 4.8 API,
* REQ_FUA flag moved to bio->bi_opf
*
* 2.6.x - 4.7 API,
* REQ_FUA
*/
static inline boolean_t
bio_is_fua(struct bio *bio)
{
#if defined(HAVE_BIO_BI_OPF)
return (bio->bi_opf & REQ_FUA);
#elif defined(REQ_FUA)
return (bio->bi_rw & REQ_FUA);
#else
#error "Allowing the build will cause fua requests to be ignored."
#endif
}
/*
* 4.8 API,
* REQ_OP_DISCARD
*
* 2.6.36 - 4.7 API,
* REQ_DISCARD
*
* In all cases the normal I/O path is used for discards. The only
* difference is how the kernel tags individual I/Os as discards.
*/
static inline boolean_t
bio_is_discard(struct bio *bio)
{
#if defined(HAVE_REQ_OP_DISCARD)
return (bio_op(bio) == REQ_OP_DISCARD);
#elif defined(HAVE_REQ_DISCARD)
return (bio->bi_rw & REQ_DISCARD);
#else
#error "Unsupported kernel"
#endif
}
/*
* 4.8 API,
* REQ_OP_SECURE_ERASE
*
* 2.6.36 - 4.7 API,
* REQ_SECURE
*/
static inline boolean_t
bio_is_secure_erase(struct bio *bio)
{
#if defined(HAVE_REQ_OP_SECURE_ERASE)
return (bio_op(bio) == REQ_OP_SECURE_ERASE);
#elif defined(REQ_SECURE)
return (bio->bi_rw & REQ_SECURE);
#else
return (0);
#endif
}
/*
* 2.6.33 API change
* Discard granularity and alignment restrictions may now be set. For
* older kernels which do not support this it is safe to skip it.
*/
static inline void
blk_queue_discard_granularity(struct request_queue *q, unsigned int dg)
{
q->limits.discard_granularity = dg;
}
/*
* 4.8 API,
* blk_queue_secure_erase()
*
* 2.6.36 - 4.7 API,
* blk_queue_secdiscard()
*/
static inline int
blk_queue_discard_secure(struct request_queue *q)
{
#if defined(HAVE_BLK_QUEUE_SECURE_ERASE)
return (blk_queue_secure_erase(q));
#elif defined(HAVE_BLK_QUEUE_SECDISCARD)
return (blk_queue_secdiscard(q));
#else
return (0);
#endif
}
/*
* A common holder for vdev_bdev_open() is used to relax the exclusive open
* semantics slightly. Internal vdev disk callers may pass VDEV_HOLDER to
* allow them to open the device multiple times. Other kernel callers and
* user space processes which don't pass this value will get EBUSY. This is
* currently required for the correct operation of hot spares.
*/
#define VDEV_HOLDER ((void *)0x2401de7)
static inline unsigned long
blk_generic_start_io_acct(struct request_queue *q __attribute__((unused)),
struct gendisk *disk __attribute__((unused)),
int rw __attribute__((unused)), struct bio *bio)
{
#if defined(HAVE_DISK_IO_ACCT)
return (disk_start_io_acct(disk, bio_sectors(bio), bio_op(bio)));
#elif defined(HAVE_BIO_IO_ACCT)
return (bio_start_io_acct(bio));
#elif defined(HAVE_GENERIC_IO_ACCT_3ARG)
unsigned long start_time = jiffies;
generic_start_io_acct(rw, bio_sectors(bio), &disk->part0);
return (start_time);
#elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
unsigned long start_time = jiffies;
generic_start_io_acct(q, rw, bio_sectors(bio), &disk->part0);
return (start_time);
#else
/* Unsupported */
return (0);
#endif
}
static inline void
blk_generic_end_io_acct(struct request_queue *q __attribute__((unused)),
struct gendisk *disk __attribute__((unused)),
int rw __attribute__((unused)), struct bio *bio, unsigned long start_time)
{
#if defined(HAVE_DISK_IO_ACCT)
disk_end_io_acct(disk, bio_op(bio), start_time);
#elif defined(HAVE_BIO_IO_ACCT)
bio_end_io_acct(bio, start_time);
#elif defined(HAVE_GENERIC_IO_ACCT_3ARG)
generic_end_io_acct(rw, &disk->part0, start_time);
#elif defined(HAVE_GENERIC_IO_ACCT_4ARG)
generic_end_io_acct(q, rw, &disk->part0, start_time);
#endif
}
#ifndef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS
static inline struct request_queue *
blk_generic_alloc_queue(make_request_fn make_request, int node_id)
{
#if defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN)
return (blk_alloc_queue(make_request, node_id));
#elif defined(HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH)
return (blk_alloc_queue_rh(make_request, node_id));
#else
struct request_queue *q = blk_alloc_queue(GFP_KERNEL);
if (q != NULL)
blk_queue_make_request(q, make_request);
return (q);
#endif
}
#endif /* !HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
#endif /* _ZFS_BLKDEV_H */
diff --git a/sys/contrib/openzfs/include/os/linux/kernel/linux/compiler_compat.h b/sys/contrib/openzfs/include/os/linux/kernel/linux/compiler_compat.h
index 921d32f246c5..2c0704da2e51 100644
--- a/sys/contrib/openzfs/include/os/linux/kernel/linux/compiler_compat.h
+++ b/sys/contrib/openzfs/include/os/linux/kernel/linux/compiler_compat.h
@@ -1,35 +1,43 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2018 Lawrence Livermore National Security, LLC.
*/
#ifndef _ZFS_COMPILER_COMPAT_H
#define _ZFS_COMPILER_COMPAT_H
#include <linux/compiler.h>
+#if !defined(fallthrough)
+#if defined(HAVE_IMPLICIT_FALLTHROUGH)
+#define fallthrough __attribute__((__fallthrough__))
+#else
+#define fallthrough ((void)0)
+#endif
+#endif
+
#if !defined(READ_ONCE)
#define READ_ONCE(x) ACCESS_ONCE(x)
#endif
#endif /* _ZFS_COMPILER_COMPAT_H */
diff --git a/sys/contrib/openzfs/include/os/linux/spl/sys/cmn_err.h b/sys/contrib/openzfs/include/os/linux/spl/sys/cmn_err.h
index 79297067c17d..d2088371c6bc 100644
--- a/sys/contrib/openzfs/include/os/linux/spl/sys/cmn_err.h
+++ b/sys/contrib/openzfs/include/os/linux/spl/sys/cmn_err.h
@@ -1,44 +1,48 @@
/*
* Copyright (C) 2007-2010 Lawrence Livermore National Security, LLC.
* Copyright (C) 2007 The Regents of the University of California.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Written by Brian Behlendorf <behlendorf1@llnl.gov>.
* UCRL-CODE-235197
*
* This file is part of the SPL, Solaris Porting Layer.
*
* The SPL is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* The SPL is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License along
* with the SPL. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef _SPL_CMN_ERR_H
#define _SPL_CMN_ERR_H
+#if defined(_KERNEL) && defined(HAVE_STANDALONE_LINUX_STDARG)
+#include <linux/stdarg.h>
+#else
#include <stdarg.h>
+#endif
#define CE_CONT 0 /* continuation */
#define CE_NOTE 1 /* notice */
#define CE_WARN 2 /* warning */
#define CE_PANIC 3 /* panic */
#define CE_IGNORE 4 /* print nothing */
extern void cmn_err(int, const char *, ...)
__attribute__((format(printf, 2, 3)));
extern void vcmn_err(int, const char *, va_list)
__attribute__((format(printf, 2, 0)));
extern void vpanic(const char *, va_list)
__attribute__((format(printf, 1, 0)));
#define fm_panic panic
#endif /* SPL_CMN_ERR_H */
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_context_os.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_context_os.h
index 981a6b8a63e5..9e4260558285 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_context_os.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/zfs_context_os.h
@@ -1,34 +1,35 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
#ifndef ZFS_CONTEXT_OS_H
#define ZFS_CONTEXT_OS_H
#include <linux/dcache_compat.h>
#include <linux/utsname_compat.h>
+#include <linux/compiler_compat.h>
#include <linux/module.h>
#if THREAD_SIZE >= 16384
#define HAVE_LARGE_STACKS 1
#endif
#endif
diff --git a/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h b/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
index 54f3fa0fdb0f..ff86e027bbe2 100644
--- a/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
+++ b/sys/contrib/openzfs/include/os/linux/zfs/sys/zpl.h
@@ -1,197 +1,201 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
*/
#ifndef _SYS_ZPL_H
#define _SYS_ZPL_H
#include <sys/mntent.h>
#include <sys/vfs.h>
#include <linux/aio.h>
#include <linux/dcache_compat.h>
#include <linux/exportfs.h>
#include <linux/falloc.h>
#include <linux/parser.h>
#include <linux/task_io_accounting_ops.h>
#include <linux/vfs_compat.h>
#include <linux/writeback.h>
#include <linux/xattr_compat.h>
/* zpl_inode.c */
extern void zpl_vap_init(vattr_t *vap, struct inode *dir,
umode_t mode, cred_t *cr);
extern const struct inode_operations zpl_inode_operations;
extern const struct inode_operations zpl_dir_inode_operations;
extern const struct inode_operations zpl_symlink_inode_operations;
extern const struct inode_operations zpl_special_inode_operations;
extern dentry_operations_t zpl_dentry_operations;
extern const struct address_space_operations zpl_address_space_operations;
extern const struct file_operations zpl_file_operations;
extern const struct file_operations zpl_dir_file_operations;
/* zpl_super.c */
extern void zpl_prune_sb(int64_t nr_to_scan, void *arg);
extern const struct super_operations zpl_super_operations;
extern const struct export_operations zpl_export_operations;
extern struct file_system_type zpl_fs_type;
/* zpl_xattr.c */
extern ssize_t zpl_xattr_list(struct dentry *dentry, char *buf, size_t size);
extern int zpl_xattr_security_init(struct inode *ip, struct inode *dip,
const struct qstr *qstr);
#if defined(CONFIG_FS_POSIX_ACL)
#if defined(HAVE_SET_ACL)
#if defined(HAVE_SET_ACL_USERNS)
extern int zpl_set_acl(struct user_namespace *userns, struct inode *ip,
struct posix_acl *acl, int type);
#else
extern int zpl_set_acl(struct inode *ip, struct posix_acl *acl, int type);
#endif /* HAVE_SET_ACL_USERNS */
#endif /* HAVE_SET_ACL */
+#if defined(HAVE_GET_ACL_RCU)
+extern struct posix_acl *zpl_get_acl(struct inode *ip, int type, bool rcu);
+#elif defined(HAVE_GET_ACL)
extern struct posix_acl *zpl_get_acl(struct inode *ip, int type);
+#endif
extern int zpl_init_acl(struct inode *ip, struct inode *dir);
extern int zpl_chmod_acl(struct inode *ip);
#else
static inline int
zpl_init_acl(struct inode *ip, struct inode *dir)
{
return (0);
}
static inline int
zpl_chmod_acl(struct inode *ip)
{
return (0);
}
#endif /* CONFIG_FS_POSIX_ACL */
extern xattr_handler_t *zpl_xattr_handlers[];
/* zpl_ctldir.c */
extern const struct file_operations zpl_fops_root;
extern const struct inode_operations zpl_ops_root;
extern const struct file_operations zpl_fops_snapdir;
extern const struct inode_operations zpl_ops_snapdir;
extern const struct dentry_operations zpl_dops_snapdirs;
extern const struct file_operations zpl_fops_shares;
extern const struct inode_operations zpl_ops_shares;
#if defined(HAVE_VFS_ITERATE) || defined(HAVE_VFS_ITERATE_SHARED)
#define ZPL_DIR_CONTEXT_INIT(_dirent, _actor, _pos) { \
.actor = _actor, \
.pos = _pos, \
}
typedef struct dir_context zpl_dir_context_t;
#define zpl_dir_emit dir_emit
#define zpl_dir_emit_dot dir_emit_dot
#define zpl_dir_emit_dotdot dir_emit_dotdot
#define zpl_dir_emit_dots dir_emit_dots
#else
typedef struct zpl_dir_context {
void *dirent;
const filldir_t actor;
loff_t pos;
} zpl_dir_context_t;
#define ZPL_DIR_CONTEXT_INIT(_dirent, _actor, _pos) { \
.dirent = _dirent, \
.actor = _actor, \
.pos = _pos, \
}
static inline bool
zpl_dir_emit(zpl_dir_context_t *ctx, const char *name, int namelen,
uint64_t ino, unsigned type)
{
return (!ctx->actor(ctx->dirent, name, namelen, ctx->pos, ino, type));
}
static inline bool
zpl_dir_emit_dot(struct file *file, zpl_dir_context_t *ctx)
{
return (ctx->actor(ctx->dirent, ".", 1, ctx->pos,
file_inode(file)->i_ino, DT_DIR) == 0);
}
static inline bool
zpl_dir_emit_dotdot(struct file *file, zpl_dir_context_t *ctx)
{
return (ctx->actor(ctx->dirent, "..", 2, ctx->pos,
parent_ino(file_dentry(file)), DT_DIR) == 0);
}
static inline bool
zpl_dir_emit_dots(struct file *file, zpl_dir_context_t *ctx)
{
if (ctx->pos == 0) {
if (!zpl_dir_emit_dot(file, ctx))
return (false);
ctx->pos = 1;
}
if (ctx->pos == 1) {
if (!zpl_dir_emit_dotdot(file, ctx))
return (false);
ctx->pos = 2;
}
return (true);
}
#endif /* HAVE_VFS_ITERATE */
#if defined(HAVE_INODE_TIMESTAMP_TRUNCATE)
#define zpl_inode_timestamp_truncate(ts, ip) timestamp_truncate(ts, ip)
#elif defined(HAVE_INODE_TIMESPEC64_TIMES)
#define zpl_inode_timestamp_truncate(ts, ip) \
timespec64_trunc(ts, (ip)->i_sb->s_time_gran)
#else
#define zpl_inode_timestamp_truncate(ts, ip) \
timespec_trunc(ts, (ip)->i_sb->s_time_gran)
#endif
#if defined(HAVE_INODE_OWNER_OR_CAPABLE)
#define zpl_inode_owner_or_capable(ns, ip) inode_owner_or_capable(ip)
#elif defined(HAVE_INODE_OWNER_OR_CAPABLE_IDMAPPED)
#define zpl_inode_owner_or_capable(ns, ip) inode_owner_or_capable(ns, ip)
#else
#error "Unsupported kernel"
#endif
#ifdef HAVE_SETATTR_PREPARE_USERNS
#define zpl_setattr_prepare(ns, dentry, ia) setattr_prepare(ns, dentry, ia)
#else
/*
* Use kernel-provided version, or our own from
* linux/vfs_compat.h
*/
#define zpl_setattr_prepare(ns, dentry, ia) setattr_prepare(dentry, ia)
#endif
#endif /* _SYS_ZPL_H */
diff --git a/sys/contrib/openzfs/include/sys/fm/fs/zfs.h b/sys/contrib/openzfs/include/sys/fm/fs/zfs.h
index 6491606d328b..cd080c8ee667 100644
--- a/sys/contrib/openzfs/include/sys/fm/fs/zfs.h
+++ b/sys/contrib/openzfs/include/sys/fm/fs/zfs.h
@@ -1,126 +1,135 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2020 by Delphix. All rights reserved.
*/
#ifndef _SYS_FM_FS_ZFS_H
#define _SYS_FM_FS_ZFS_H
#ifdef __cplusplus
extern "C" {
#endif
#define ZFS_ERROR_CLASS "fs.zfs"
#define FM_EREPORT_ZFS_CHECKSUM "checksum"
#define FM_EREPORT_ZFS_AUTHENTICATION "authentication"
#define FM_EREPORT_ZFS_IO "io"
#define FM_EREPORT_ZFS_DATA "data"
#define FM_EREPORT_ZFS_DELAY "delay"
#define FM_EREPORT_ZFS_DEADMAN "deadman"
#define FM_EREPORT_ZFS_POOL "zpool"
#define FM_EREPORT_ZFS_DEVICE_UNKNOWN "vdev.unknown"
#define FM_EREPORT_ZFS_DEVICE_OPEN_FAILED "vdev.open_failed"
#define FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA "vdev.corrupt_data"
#define FM_EREPORT_ZFS_DEVICE_NO_REPLICAS "vdev.no_replicas"
#define FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM "vdev.bad_guid_sum"
#define FM_EREPORT_ZFS_DEVICE_TOO_SMALL "vdev.too_small"
#define FM_EREPORT_ZFS_DEVICE_BAD_LABEL "vdev.bad_label"
#define FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT "vdev.bad_ashift"
#define FM_EREPORT_ZFS_IO_FAILURE "io_failure"
#define FM_EREPORT_ZFS_PROBE_FAILURE "probe_failure"
#define FM_EREPORT_ZFS_LOG_REPLAY "log_replay"
#define FM_EREPORT_ZFS_CONFIG_CACHE_WRITE "config_cache_write"
#define FM_EREPORT_PAYLOAD_ZFS_POOL "pool"
#define FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE "pool_failmode"
#define FM_EREPORT_PAYLOAD_ZFS_POOL_GUID "pool_guid"
#define FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT "pool_context"
#define FM_EREPORT_PAYLOAD_ZFS_POOL_STATE "pool_state"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID "vdev_guid"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE "vdev_type"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH "vdev_path"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_PHYSPATH "vdev_physpath"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH "vdev_enc_sysfs_path"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID "vdev_devid"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU "vdev_fru"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE "vdev_state"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE "vdev_laststate"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_ASHIFT "vdev_ashift"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_COMP_TS "vdev_complete_ts"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_DELTA_TS "vdev_delta_ts"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_PATHS "vdev_spare_paths"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_GUIDS "vdev_spare_guids"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_READ_ERRORS "vdev_read_errors"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_WRITE_ERRORS "vdev_write_errors"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_ERRORS "vdev_cksum_errors"
#define FM_EREPORT_PAYLOAD_ZFS_VDEV_DELAYS "vdev_delays"
#define FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID "parent_guid"
#define FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE "parent_type"
#define FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH "parent_path"
#define FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID "parent_devid"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET "zio_objset"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT "zio_object"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL "zio_level"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID "zio_blkid"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR "zio_err"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET "zio_offset"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE "zio_size"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS "zio_flags"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE "zio_stage"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY "zio_priority"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE "zio_pipeline"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_DELAY "zio_delay"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_TIMESTAMP "zio_timestamp"
#define FM_EREPORT_PAYLOAD_ZFS_ZIO_DELTA "zio_delta"
#define FM_EREPORT_PAYLOAD_ZFS_PREV_STATE "prev_state"
#define FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED "cksum_expected"
#define FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL "cksum_actual"
#define FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO "cksum_algorithm"
#define FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP "cksum_byteswap"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES "bad_ranges"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP "bad_ranges_min_gap"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS "bad_range_sets"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS "bad_range_clears"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS "bad_set_bits"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS "bad_cleared_bits"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM "bad_set_histogram"
#define FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM "bad_cleared_histogram"
+#define FM_EREPORT_PAYLOAD_ZFS_SNAPSHOT_NAME "snapshot_name"
+#define FM_EREPORT_PAYLOAD_ZFS_DEVICE_NAME "device_name"
+#define FM_EREPORT_PAYLOAD_ZFS_RAW_DEVICE_NAME "raw_name"
+#define FM_EREPORT_PAYLOAD_ZFS_VOLUME "volume"
#define FM_EREPORT_FAILMODE_WAIT "wait"
#define FM_EREPORT_FAILMODE_CONTINUE "continue"
#define FM_EREPORT_FAILMODE_PANIC "panic"
#define FM_RESOURCE_REMOVED "removed"
#define FM_RESOURCE_AUTOREPLACE "autoreplace"
#define FM_RESOURCE_STATECHANGE "statechange"
+#define FM_RESOURCE_ZFS_SNAPSHOT_MOUNT "snapshot_mount"
+#define FM_RESOURCE_ZFS_SNAPSHOT_UNMOUNT "snapshot_unmount"
+#define FM_RESOURCE_ZVOL_CREATE_SYMLINK "zvol_create"
+#define FM_RESOURCE_ZVOL_REMOVE_SYMLINK "zvol_remove"
+
#ifdef __cplusplus
}
#endif
#endif /* _SYS_FM_FS_ZFS_H */
diff --git a/sys/contrib/openzfs/include/sys/spa.h b/sys/contrib/openzfs/include/sys/spa.h
index f811d6f5a743..2ae467877ddf 100644
--- a/sys/contrib/openzfs/include/sys/spa.h
+++ b/sys/contrib/openzfs/include/sys/spa.h
@@ -1,1211 +1,1213 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2021 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2019, Klara Inc.
*/
#ifndef _SYS_SPA_H
#define _SYS_SPA_H
#include <sys/avl.h>
#include <sys/zfs_context.h>
#include <sys/kstat.h>
#include <sys/nvpair.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <sys/fs/zfs.h>
#include <sys/spa_checksum.h>
#include <sys/dmu.h>
#include <sys/space_map.h>
#include <sys/bitops.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Forward references that lots of things need.
*/
typedef struct spa spa_t;
typedef struct vdev vdev_t;
typedef struct metaslab metaslab_t;
typedef struct metaslab_group metaslab_group_t;
typedef struct metaslab_class metaslab_class_t;
typedef struct zio zio_t;
typedef struct zilog zilog_t;
typedef struct spa_aux_vdev spa_aux_vdev_t;
typedef struct ddt ddt_t;
typedef struct ddt_entry ddt_entry_t;
typedef struct zbookmark_phys zbookmark_phys_t;
struct bpobj;
struct bplist;
struct dsl_pool;
struct dsl_dataset;
struct dsl_crypto_params;
/*
* Alignment Shift (ashift) is an immutable, internal top-level vdev property
* which can only be set at vdev creation time. Physical writes are always done
* according to it, which makes 2^ashift the smallest possible IO on a vdev.
*
* We currently allow values ranging from 512 bytes (2^9 = 512) to 64 KiB
* (2^16 = 65,536).
*/
#define ASHIFT_MIN 9
#define ASHIFT_MAX 16
/*
* Size of block to hold the configuration data (a packed nvlist)
*/
#define SPA_CONFIG_BLOCKSIZE (1ULL << 14)
/*
* The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB.
* The ASIZE encoding should be at least 64 times larger (6 more bits)
* to support up to 4-way RAID-Z mirror mode with worst-case gang block
* overhead, three DVAs per bp, plus one more bit in case we do anything
* else that expands the ASIZE.
*/
#define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */
#define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */
#define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */
#define SPA_COMPRESSBITS 7
#define SPA_VDEVBITS 24
#define SPA_COMPRESSMASK ((1U << SPA_COMPRESSBITS) - 1)
/*
* All SPA data is represented by 128-bit data virtual addresses (DVAs).
* The members of the dva_t should be considered opaque outside the SPA.
*/
typedef struct dva {
uint64_t dva_word[2];
} dva_t;
/*
* Some checksums/hashes need a 256-bit initialization salt. This salt is kept
* secret and is suitable for use in MAC algorithms as the key.
*/
typedef struct zio_cksum_salt {
uint8_t zcs_bytes[32];
} zio_cksum_salt_t;
/*
* Each block is described by its DVAs, time of birth, checksum, etc.
* The word-by-word, bit-by-bit layout of the blkptr is as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | pad | vdev1 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 1 |G| offset1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 2 | pad | vdev2 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 3 |G| offset2 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 4 | pad | vdev3 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 5 |G| offset3 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 8 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 9 | physical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | fill count |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* c | checksum[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* d | checksum[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* e | checksum[2] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* f | checksum[3] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* vdev virtual device ID
* offset offset into virtual device
* LSIZE logical size
* PSIZE physical size (after compression)
* ASIZE allocated size (including RAID-Z parity and gang block headers)
* GRID RAID-Z layout information (reserved for future use)
* cksum checksum function
* comp compression function
* G gang block indicator
* B byteorder (endianness)
* D dedup
* X encryption
* E blkptr_t contains embedded data (see below)
* lvl level of indirection
* type DMU object type
* phys birth txg when dva[0] was written; zero if same as logical birth txg
* note that typically all the dva's would be written in this
* txg, but they could be different if they were moved by
* device removal.
* log. birth transaction group in which the block was logically born
* fill count number of non-zero blocks under this bp
* checksum[4] 256-bit checksum of the data this bp describes
*/
/*
* The blkptr_t's of encrypted blocks also need to store the encryption
* parameters so that the block can be decrypted. This layout is as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | vdev1 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 1 |G| offset1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 2 | vdev2 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 3 |G| offset2 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 4 | salt |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 5 | IV1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 8 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 9 | physical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | IV2 | fill count |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* c | checksum[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* d | checksum[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* e | MAC[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* f | MAC[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* salt Salt for generating encryption keys
* IV1 First 64 bits of encryption IV
* X Block requires encryption handling (set to 1)
* E blkptr_t contains embedded data (set to 0, see below)
* fill count number of non-zero blocks under this bp (truncated to 32 bits)
* IV2 Last 32 bits of encryption IV
* checksum[2] 128-bit checksum of the data this bp describes
* MAC[2] 128-bit message authentication code for this data
*
* The X bit being set indicates that this block is one of 3 types. If this is
* a level 0 block with an encrypted object type, the block is encrypted
* (see BP_IS_ENCRYPTED()). If this is a level 0 block with an unencrypted
* object type, this block is authenticated with an HMAC (see
* BP_IS_AUTHENTICATED()). Otherwise (if level > 0), this bp will use the MAC
* words to store a checksum-of-MACs from the level below (see
* BP_HAS_INDIRECT_MAC_CKSUM()). For convenience in the code, BP_IS_PROTECTED()
* refers to both encrypted and authenticated blocks and BP_USES_CRYPT()
* refers to any of these 3 kinds of blocks.
*
* The additional encryption parameters are the salt, IV, and MAC which are
* explained in greater detail in the block comment at the top of zio_crypt.c.
* The MAC occupies half of the checksum space since it serves a very similar
* purpose: to prevent data corruption on disk. The only functional difference
* is that the checksum is used to detect on-disk corruption whether or not the
* encryption key is loaded and the MAC provides additional protection against
* malicious disk tampering. We use the 3rd DVA to store the salt and first
* 64 bits of the IV. As a result encrypted blocks can only have 2 copies
* maximum instead of the normal 3. The last 32 bits of the IV are stored in
* the upper bits of what is usually the fill count. Note that only blocks at
* level 0 or -2 are ever encrypted, which allows us to guarantee that these
* 32 bits are not trampled over by other code (see zio_crypt.c for details).
* The salt and IV are not used for authenticated bps or bps with an indirect
* MAC checksum, so these blocks can utilize all 3 DVAs and the full 64 bits
* for the fill count.
*/
/*
* "Embedded" blkptr_t's don't actually point to a block, instead they
* have a data payload embedded in the blkptr_t itself. See the comment
* in blkptr.c for more details.
*
* The blkptr_t is laid out as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | payload |
* 1 | payload |
* 2 | payload |
* 3 | payload |
* 4 | payload |
* 5 | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | etype |E| comp| PSIZE| LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | payload |
* 8 | payload |
* 9 | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | payload |
* c | payload |
* d | payload |
* e | payload |
* f | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* payload contains the embedded data
* B (byteorder) byteorder (endianness)
* D (dedup) padding (set to zero)
* X encryption (set to zero)
* E (embedded) set to one
* lvl indirection level
* type DMU object type
* etype how to interpret embedded data (BP_EMBEDDED_TYPE_*)
* comp compression function of payload
* PSIZE size of payload after compression, in bytes
* LSIZE logical size of payload, in bytes
* note that 25 bits is enough to store the largest
* "normal" BP's LSIZE (2^16 * 2^9) in bytes
* log. birth transaction group in which the block was logically born
*
* Note that LSIZE and PSIZE are stored in bytes, whereas for non-embedded
* bp's they are stored in units of SPA_MINBLOCKSHIFT.
* Generally, the generic BP_GET_*() macros can be used on embedded BP's.
* The B, D, X, lvl, type, and comp fields are stored the same as with normal
* BP's so the BP_SET_* macros can be used with them. etype, PSIZE, LSIZE must
* be set with the BPE_SET_* macros. BP_SET_EMBEDDED() should be called before
* other macros, as they assert that they are only used on BP's of the correct
* "embedded-ness". Encrypted blkptr_t's cannot be embedded because they use
* the payload space for encryption parameters (see the comment above on
* how encryption parameters are stored).
*/
#define BPE_GET_ETYPE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET((bp)->blk_prop, 40, 8))
#define BPE_SET_ETYPE(bp, t) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET((bp)->blk_prop, 40, 8, t); \
} while (0)
#define BPE_GET_LSIZE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET_SB((bp)->blk_prop, 0, 25, 0, 1))
#define BPE_SET_LSIZE(bp, x) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, 0, 25, 0, 1, x); \
} while (0)
#define BPE_GET_PSIZE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET_SB((bp)->blk_prop, 25, 7, 0, 1))
#define BPE_SET_PSIZE(bp, x) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, 25, 7, 0, 1, x); \
} while (0)
typedef enum bp_embedded_type {
BP_EMBEDDED_TYPE_DATA,
BP_EMBEDDED_TYPE_RESERVED, /* Reserved for Delphix byteswap feature. */
BP_EMBEDDED_TYPE_REDACTED,
NUM_BP_EMBEDDED_TYPES
} bp_embedded_type_t;
#define BPE_NUM_WORDS 14
#define BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
#define BPE_IS_PAYLOADWORD(bp, wp) \
((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
#define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */
#define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */
#define SPA_SYNC_MIN_VDEVS 3 /* min vdevs to update during sync */
/*
* A block is a hole when it has either 1) never been written to, or
* 2) is zero-filled. In both cases, ZFS can return all zeroes for all reads
* without physically allocating disk space. Holes are represented in the
* blkptr_t structure by zeroed blk_dva. Correct checking for holes is
* done through the BP_IS_HOLE macro. For holes, the logical size, level,
* DMU object type, and birth times are all also stored for holes that
* were written to at some point (i.e. were punched after having been filled).
*/
typedef struct blkptr {
dva_t blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */
uint64_t blk_prop; /* size, compression, type, etc */
uint64_t blk_pad[2]; /* Extra space for the future */
uint64_t blk_phys_birth; /* txg when block was allocated */
uint64_t blk_birth; /* transaction group at birth */
uint64_t blk_fill; /* fill count */
zio_cksum_t blk_cksum; /* 256-bit checksum */
} blkptr_t;
/*
* Macros to get and set fields in a bp or DVA.
*/
/*
* Note, for gang blocks, DVA_GET_ASIZE() is the total space allocated for
* this gang DVA including its children BP's. The space allocated at this
* DVA's vdev/offset is vdev_gang_header_asize(vdev).
*/
#define DVA_GET_ASIZE(dva) \
BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0)
#define DVA_SET_ASIZE(dva, x) \
BF64_SET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, \
SPA_MINBLOCKSHIFT, 0, x)
#define DVA_GET_GRID(dva) BF64_GET((dva)->dva_word[0], 24, 8)
#define DVA_SET_GRID(dva, x) BF64_SET((dva)->dva_word[0], 24, 8, x)
#define DVA_GET_VDEV(dva) BF64_GET((dva)->dva_word[0], 32, SPA_VDEVBITS)
#define DVA_SET_VDEV(dva, x) \
BF64_SET((dva)->dva_word[0], 32, SPA_VDEVBITS, x)
#define DVA_GET_OFFSET(dva) \
BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0)
#define DVA_SET_OFFSET(dva, x) \
BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x)
#define DVA_GET_GANG(dva) BF64_GET((dva)->dva_word[1], 63, 1)
#define DVA_SET_GANG(dva, x) BF64_SET((dva)->dva_word[1], 63, 1, x)
#define BP_GET_LSIZE(bp) \
(BP_IS_EMBEDDED(bp) ? \
(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA ? BPE_GET_LSIZE(bp) : 0): \
BF64_GET_SB((bp)->blk_prop, 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1))
#define BP_SET_LSIZE(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, \
0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
} while (0)
#define BP_GET_PSIZE(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
BF64_GET_SB((bp)->blk_prop, 16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1))
#define BP_SET_PSIZE(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, \
16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
} while (0)
#define BP_GET_COMPRESS(bp) \
BF64_GET((bp)->blk_prop, 32, SPA_COMPRESSBITS)
#define BP_SET_COMPRESS(bp, x) \
BF64_SET((bp)->blk_prop, 32, SPA_COMPRESSBITS, x)
#define BP_IS_EMBEDDED(bp) BF64_GET((bp)->blk_prop, 39, 1)
#define BP_SET_EMBEDDED(bp, x) BF64_SET((bp)->blk_prop, 39, 1, x)
#define BP_GET_CHECKSUM(bp) \
(BP_IS_EMBEDDED(bp) ? ZIO_CHECKSUM_OFF : \
BF64_GET((bp)->blk_prop, 40, 8))
#define BP_SET_CHECKSUM(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET((bp)->blk_prop, 40, 8, x); \
} while (0)
#define BP_GET_TYPE(bp) BF64_GET((bp)->blk_prop, 48, 8)
#define BP_SET_TYPE(bp, x) BF64_SET((bp)->blk_prop, 48, 8, x)
#define BP_GET_LEVEL(bp) BF64_GET((bp)->blk_prop, 56, 5)
#define BP_SET_LEVEL(bp, x) BF64_SET((bp)->blk_prop, 56, 5, x)
/* encrypted, authenticated, and MAC cksum bps use the same bit */
#define BP_USES_CRYPT(bp) BF64_GET((bp)->blk_prop, 61, 1)
#define BP_SET_CRYPT(bp, x) BF64_SET((bp)->blk_prop, 61, 1, x)
#define BP_IS_ENCRYPTED(bp) \
(BP_USES_CRYPT(bp) && \
BP_GET_LEVEL(bp) <= 0 && \
DMU_OT_IS_ENCRYPTED(BP_GET_TYPE(bp)))
#define BP_IS_AUTHENTICATED(bp) \
(BP_USES_CRYPT(bp) && \
BP_GET_LEVEL(bp) <= 0 && \
!DMU_OT_IS_ENCRYPTED(BP_GET_TYPE(bp)))
#define BP_HAS_INDIRECT_MAC_CKSUM(bp) \
(BP_USES_CRYPT(bp) && BP_GET_LEVEL(bp) > 0)
#define BP_IS_PROTECTED(bp) \
(BP_IS_ENCRYPTED(bp) || BP_IS_AUTHENTICATED(bp))
#define BP_GET_DEDUP(bp) BF64_GET((bp)->blk_prop, 62, 1)
#define BP_SET_DEDUP(bp, x) BF64_SET((bp)->blk_prop, 62, 1, x)
#define BP_GET_BYTEORDER(bp) BF64_GET((bp)->blk_prop, 63, 1)
#define BP_SET_BYTEORDER(bp, x) BF64_SET((bp)->blk_prop, 63, 1, x)
#define BP_GET_FREE(bp) BF64_GET((bp)->blk_fill, 0, 1)
#define BP_SET_FREE(bp, x) BF64_SET((bp)->blk_fill, 0, 1, x)
#define BP_PHYSICAL_BIRTH(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
(bp)->blk_phys_birth ? (bp)->blk_phys_birth : (bp)->blk_birth)
#define BP_SET_BIRTH(bp, logical, physical) \
{ \
ASSERT(!BP_IS_EMBEDDED(bp)); \
(bp)->blk_birth = (logical); \
(bp)->blk_phys_birth = ((logical) == (physical) ? 0 : (physical)); \
}
#define BP_GET_FILL(bp) \
((BP_IS_ENCRYPTED(bp)) ? BF64_GET((bp)->blk_fill, 0, 32) : \
((BP_IS_EMBEDDED(bp)) ? 1 : (bp)->blk_fill))
#define BP_SET_FILL(bp, fill) \
{ \
if (BP_IS_ENCRYPTED(bp)) \
BF64_SET((bp)->blk_fill, 0, 32, fill); \
else \
(bp)->blk_fill = fill; \
}
#define BP_GET_IV2(bp) \
(ASSERT(BP_IS_ENCRYPTED(bp)), \
BF64_GET((bp)->blk_fill, 32, 32))
#define BP_SET_IV2(bp, iv2) \
{ \
ASSERT(BP_IS_ENCRYPTED(bp)); \
BF64_SET((bp)->blk_fill, 32, 32, iv2); \
}
#define BP_IS_METADATA(bp) \
(BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
#define BP_GET_ASIZE(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
(DVA_GET_ASIZE(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp)))
#define BP_GET_UCSIZE(bp) \
(BP_IS_METADATA(bp) ? BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp))
#define BP_GET_NDVAS(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
!!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
(!!DVA_GET_ASIZE(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp)))
#define BP_COUNT_GANG(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
(DVA_GET_GANG(&(bp)->blk_dva[0]) + \
DVA_GET_GANG(&(bp)->blk_dva[1]) + \
(DVA_GET_GANG(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp))))
#define DVA_EQUAL(dva1, dva2) \
((dva1)->dva_word[1] == (dva2)->dva_word[1] && \
(dva1)->dva_word[0] == (dva2)->dva_word[0])
#define BP_EQUAL(bp1, bp2) \
(BP_PHYSICAL_BIRTH(bp1) == BP_PHYSICAL_BIRTH(bp2) && \
(bp1)->blk_birth == (bp2)->blk_birth && \
DVA_EQUAL(&(bp1)->blk_dva[0], &(bp2)->blk_dva[0]) && \
DVA_EQUAL(&(bp1)->blk_dva[1], &(bp2)->blk_dva[1]) && \
DVA_EQUAL(&(bp1)->blk_dva[2], &(bp2)->blk_dva[2]))
#define DVA_IS_VALID(dva) (DVA_GET_ASIZE(dva) != 0)
#define BP_IDENTITY(bp) (ASSERT(!BP_IS_EMBEDDED(bp)), &(bp)->blk_dva[0])
#define BP_IS_GANG(bp) \
(BP_IS_EMBEDDED(bp) ? B_FALSE : DVA_GET_GANG(BP_IDENTITY(bp)))
#define DVA_IS_EMPTY(dva) ((dva)->dva_word[0] == 0ULL && \
(dva)->dva_word[1] == 0ULL)
#define BP_IS_HOLE(bp) \
(!BP_IS_EMBEDDED(bp) && DVA_IS_EMPTY(BP_IDENTITY(bp)))
#define BP_SET_REDACTED(bp) \
{ \
BP_SET_EMBEDDED(bp, B_TRUE); \
BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_REDACTED); \
}
#define BP_IS_REDACTED(bp) \
(BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_REDACTED)
/* BP_IS_RAIDZ(bp) assumes no block compression */
#define BP_IS_RAIDZ(bp) (DVA_GET_ASIZE(&(bp)->blk_dva[0]) > \
BP_GET_PSIZE(bp))
#define BP_ZERO(bp) \
{ \
(bp)->blk_dva[0].dva_word[0] = 0; \
(bp)->blk_dva[0].dva_word[1] = 0; \
(bp)->blk_dva[1].dva_word[0] = 0; \
(bp)->blk_dva[1].dva_word[1] = 0; \
(bp)->blk_dva[2].dva_word[0] = 0; \
(bp)->blk_dva[2].dva_word[1] = 0; \
(bp)->blk_prop = 0; \
(bp)->blk_pad[0] = 0; \
(bp)->blk_pad[1] = 0; \
(bp)->blk_phys_birth = 0; \
(bp)->blk_birth = 0; \
(bp)->blk_fill = 0; \
ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \
}
#ifdef _ZFS_BIG_ENDIAN
#define ZFS_HOST_BYTEORDER (0ULL)
#else
#define ZFS_HOST_BYTEORDER (1ULL)
#endif
#define BP_SHOULD_BYTESWAP(bp) (BP_GET_BYTEORDER(bp) != ZFS_HOST_BYTEORDER)
#define BP_SPRINTF_LEN 400
/*
* This macro allows code sharing between zfs, libzpool, and mdb.
* 'func' is either snprintf() or mdb_snprintf().
* 'ws' (whitespace) can be ' ' for single-line format, '\n' for multi-line.
*/
#define SNPRINTF_BLKPTR(func, ws, buf, size, bp, type, checksum, compress) \
{ \
static const char *copyname[] = \
{ "zero", "single", "double", "triple" }; \
int len = 0; \
int copies = 0; \
const char *crypt_type; \
if (bp != NULL) { \
if (BP_IS_ENCRYPTED(bp)) { \
crypt_type = "encrypted"; \
/* LINTED E_SUSPICIOUS_COMPARISON */ \
} else if (BP_IS_AUTHENTICATED(bp)) { \
crypt_type = "authenticated"; \
} else if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) { \
crypt_type = "indirect-MAC"; \
} else { \
crypt_type = "unencrypted"; \
} \
} \
if (bp == NULL) { \
len += func(buf + len, size - len, "<NULL>"); \
} else if (BP_IS_HOLE(bp)) { \
len += func(buf + len, size - len, \
"HOLE [L%llu %s] " \
"size=%llxL birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else if (BP_IS_EMBEDDED(bp)) { \
len = func(buf + len, size - len, \
"EMBEDDED [L%llu %s] et=%u %s " \
"size=%llxL/%llxP birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(int)BPE_GET_ETYPE(bp), \
compress, \
(u_longlong_t)BPE_GET_LSIZE(bp), \
(u_longlong_t)BPE_GET_PSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else if (BP_IS_REDACTED(bp)) { \
len += func(buf + len, size - len, \
"REDACTED [L%llu %s] size=%llxL birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else { \
for (int d = 0; d < BP_GET_NDVAS(bp); d++) { \
const dva_t *dva = &bp->blk_dva[d]; \
if (DVA_IS_VALID(dva)) \
copies++; \
len += func(buf + len, size - len, \
"DVA[%d]=<%llu:%llx:%llx>%c", d, \
(u_longlong_t)DVA_GET_VDEV(dva), \
(u_longlong_t)DVA_GET_OFFSET(dva), \
(u_longlong_t)DVA_GET_ASIZE(dva), \
ws); \
} \
if (BP_IS_ENCRYPTED(bp)) { \
len += func(buf + len, size - len, \
"salt=%llx iv=%llx:%llx%c", \
(u_longlong_t)bp->blk_dva[2].dva_word[0], \
(u_longlong_t)bp->blk_dva[2].dva_word[1], \
(u_longlong_t)BP_GET_IV2(bp), \
ws); \
} \
if (BP_IS_GANG(bp) && \
DVA_GET_ASIZE(&bp->blk_dva[2]) <= \
DVA_GET_ASIZE(&bp->blk_dva[1]) / 2) \
copies--; \
len += func(buf + len, size - len, \
"[L%llu %s] %s %s %s %s %s %s %s%c" \
"size=%llxL/%llxP birth=%lluL/%lluP fill=%llu%c" \
"cksum=%llx:%llx:%llx:%llx", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
checksum, \
compress, \
crypt_type, \
BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE", \
BP_IS_GANG(bp) ? "gang" : "contiguous", \
BP_GET_DEDUP(bp) ? "dedup" : "unique", \
copyname[copies], \
ws, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)BP_GET_PSIZE(bp), \
(u_longlong_t)bp->blk_birth, \
(u_longlong_t)BP_PHYSICAL_BIRTH(bp), \
(u_longlong_t)BP_GET_FILL(bp), \
ws, \
(u_longlong_t)bp->blk_cksum.zc_word[0], \
(u_longlong_t)bp->blk_cksum.zc_word[1], \
(u_longlong_t)bp->blk_cksum.zc_word[2], \
(u_longlong_t)bp->blk_cksum.zc_word[3]); \
} \
ASSERT(len < size); \
}
#define BP_GET_BUFC_TYPE(bp) \
(BP_IS_METADATA(bp) ? ARC_BUFC_METADATA : ARC_BUFC_DATA)
typedef enum spa_import_type {
SPA_IMPORT_EXISTING,
SPA_IMPORT_ASSEMBLE
} spa_import_type_t;
typedef enum spa_mode {
SPA_MODE_UNINIT = 0,
SPA_MODE_READ = 1,
SPA_MODE_WRITE = 2,
} spa_mode_t;
/*
* Send TRIM commands in-line during normal pool operation while deleting.
* OFF: no
* ON: yes
* NB: IN_FREEBSD_BASE is defined within the FreeBSD sources.
*/
typedef enum {
SPA_AUTOTRIM_OFF = 0, /* default */
SPA_AUTOTRIM_ON,
#ifdef IN_FREEBSD_BASE
SPA_AUTOTRIM_DEFAULT = SPA_AUTOTRIM_ON,
#else
SPA_AUTOTRIM_DEFAULT = SPA_AUTOTRIM_OFF,
#endif
} spa_autotrim_t;
/*
* Reason TRIM command was issued, used internally for accounting purposes.
*/
typedef enum trim_type {
TRIM_TYPE_MANUAL = 0,
TRIM_TYPE_AUTO = 1,
TRIM_TYPE_SIMPLE = 2
} trim_type_t;
/* state manipulation functions */
extern int spa_open(const char *pool, spa_t **, void *tag);
extern int spa_open_rewind(const char *pool, spa_t **, void *tag,
nvlist_t *policy, nvlist_t **config);
extern int spa_get_stats(const char *pool, nvlist_t **config, char *altroot,
size_t buflen);
extern int spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
nvlist_t *zplprops, struct dsl_crypto_params *dcp);
extern int spa_import(char *pool, nvlist_t *config, nvlist_t *props,
uint64_t flags);
extern nvlist_t *spa_tryimport(nvlist_t *tryconfig);
extern int spa_destroy(const char *pool);
extern int spa_checkpoint(const char *pool);
extern int spa_checkpoint_discard(const char *pool);
extern int spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
boolean_t hardforce);
extern int spa_reset(const char *pool);
extern void spa_async_request(spa_t *spa, int flag);
extern void spa_async_unrequest(spa_t *spa, int flag);
extern void spa_async_suspend(spa_t *spa);
extern void spa_async_resume(spa_t *spa);
extern int spa_async_tasks(spa_t *spa);
extern spa_t *spa_inject_addref(char *pool);
extern void spa_inject_delref(spa_t *spa);
extern void spa_scan_stat_init(spa_t *spa);
extern int spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps);
extern int bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx);
extern int bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx);
#define SPA_ASYNC_CONFIG_UPDATE 0x01
#define SPA_ASYNC_REMOVE 0x02
#define SPA_ASYNC_PROBE 0x04
#define SPA_ASYNC_RESILVER_DONE 0x08
#define SPA_ASYNC_RESILVER 0x10
#define SPA_ASYNC_AUTOEXPAND 0x20
#define SPA_ASYNC_REMOVE_DONE 0x40
#define SPA_ASYNC_REMOVE_STOP 0x80
#define SPA_ASYNC_INITIALIZE_RESTART 0x100
#define SPA_ASYNC_TRIM_RESTART 0x200
#define SPA_ASYNC_AUTOTRIM_RESTART 0x400
#define SPA_ASYNC_L2CACHE_REBUILD 0x800
#define SPA_ASYNC_L2CACHE_TRIM 0x1000
#define SPA_ASYNC_REBUILD_DONE 0x2000
/* device manipulation */
extern int spa_vdev_add(spa_t *spa, nvlist_t *nvroot);
extern int spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot,
int replacing, int rebuild);
extern int spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid,
int replace_done);
extern int spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare);
extern boolean_t spa_vdev_remove_active(spa_t *spa);
extern int spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
nvlist_t *vdev_errlist);
extern int spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
uint64_t rate, boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist);
extern int spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath);
extern int spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru);
extern int spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
nvlist_t *props, boolean_t exp);
/* spare state (which is global across all pools) */
extern void spa_spare_add(vdev_t *vd);
extern void spa_spare_remove(vdev_t *vd);
extern boolean_t spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt);
extern void spa_spare_activate(vdev_t *vd);
/* L2ARC state (which is global across all pools) */
extern void spa_l2cache_add(vdev_t *vd);
extern void spa_l2cache_remove(vdev_t *vd);
extern boolean_t spa_l2cache_exists(uint64_t guid, uint64_t *pool);
extern void spa_l2cache_activate(vdev_t *vd);
extern void spa_l2cache_drop(spa_t *spa);
/* scanning */
extern int spa_scan(spa_t *spa, pool_scan_func_t func);
extern int spa_scan_stop(spa_t *spa);
extern int spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t flag);
/* spa syncing */
extern void spa_sync(spa_t *spa, uint64_t txg); /* only for DMU use */
extern void spa_sync_allpools(void);
extern int zfs_sync_pass_deferred_free;
/* spa namespace global mutex */
extern kmutex_t spa_namespace_lock;
/*
* SPA configuration functions in spa_config.c
*/
#define SPA_CONFIG_UPDATE_POOL 0
#define SPA_CONFIG_UPDATE_VDEVS 1
extern void spa_write_cachefile(spa_t *, boolean_t, boolean_t);
extern void spa_config_load(void);
extern nvlist_t *spa_all_configs(uint64_t *);
extern void spa_config_set(spa_t *spa, nvlist_t *config);
extern nvlist_t *spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg,
int getstats);
extern void spa_config_update(spa_t *spa, int what);
extern int spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv,
vdev_t *parent, uint_t id, int atype);
/*
* Miscellaneous SPA routines in spa_misc.c
*/
/* Namespace manipulation */
extern spa_t *spa_lookup(const char *name);
extern spa_t *spa_add(const char *name, nvlist_t *config, const char *altroot);
extern void spa_remove(spa_t *spa);
extern spa_t *spa_next(spa_t *prev);
/* Refcount functions */
extern void spa_open_ref(spa_t *spa, void *tag);
extern void spa_close(spa_t *spa, void *tag);
extern void spa_async_close(spa_t *spa, void *tag);
extern boolean_t spa_refcount_zero(spa_t *spa);
#define SCL_NONE 0x00
#define SCL_CONFIG 0x01
#define SCL_STATE 0x02
#define SCL_L2ARC 0x04 /* hack until L2ARC 2.0 */
#define SCL_ALLOC 0x08
#define SCL_ZIO 0x10
#define SCL_FREE 0x20
#define SCL_VDEV 0x40
#define SCL_LOCKS 7
#define SCL_ALL ((1 << SCL_LOCKS) - 1)
#define SCL_STATE_ALL (SCL_STATE | SCL_L2ARC | SCL_ZIO)
/* Historical pool statistics */
typedef struct spa_history_kstat {
kmutex_t lock;
uint64_t count;
uint64_t size;
kstat_t *kstat;
void *priv;
list_t list;
} spa_history_kstat_t;
typedef struct spa_history_list {
uint64_t size;
procfs_list_t procfs_list;
} spa_history_list_t;
typedef struct spa_stats {
spa_history_list_t read_history;
spa_history_list_t txg_history;
spa_history_kstat_t tx_assign_histogram;
spa_history_list_t mmp_history;
spa_history_kstat_t state; /* pool state */
spa_history_kstat_t iostats;
} spa_stats_t;
typedef enum txg_state {
TXG_STATE_BIRTH = 0,
TXG_STATE_OPEN = 1,
TXG_STATE_QUIESCED = 2,
TXG_STATE_WAIT_FOR_SYNC = 3,
TXG_STATE_SYNCED = 4,
TXG_STATE_COMMITTED = 5,
} txg_state_t;
typedef struct txg_stat {
vdev_stat_t vs1;
vdev_stat_t vs2;
uint64_t txg;
uint64_t ndirty;
} txg_stat_t;
/* Assorted pool IO kstats */
typedef struct spa_iostats {
kstat_named_t trim_extents_written;
kstat_named_t trim_bytes_written;
kstat_named_t trim_extents_skipped;
kstat_named_t trim_bytes_skipped;
kstat_named_t trim_extents_failed;
kstat_named_t trim_bytes_failed;
kstat_named_t autotrim_extents_written;
kstat_named_t autotrim_bytes_written;
kstat_named_t autotrim_extents_skipped;
kstat_named_t autotrim_bytes_skipped;
kstat_named_t autotrim_extents_failed;
kstat_named_t autotrim_bytes_failed;
kstat_named_t simple_trim_extents_written;
kstat_named_t simple_trim_bytes_written;
kstat_named_t simple_trim_extents_skipped;
kstat_named_t simple_trim_bytes_skipped;
kstat_named_t simple_trim_extents_failed;
kstat_named_t simple_trim_bytes_failed;
} spa_iostats_t;
extern void spa_stats_init(spa_t *spa);
extern void spa_stats_destroy(spa_t *spa);
extern void spa_read_history_add(spa_t *spa, const zbookmark_phys_t *zb,
uint32_t aflags);
extern void spa_txg_history_add(spa_t *spa, uint64_t txg, hrtime_t birth_time);
extern int spa_txg_history_set(spa_t *spa, uint64_t txg,
txg_state_t completed_state, hrtime_t completed_time);
extern txg_stat_t *spa_txg_history_init_io(spa_t *, uint64_t,
struct dsl_pool *);
extern void spa_txg_history_fini_io(spa_t *, txg_stat_t *);
extern void spa_tx_assign_add_nsecs(spa_t *spa, uint64_t nsecs);
extern int spa_mmp_history_set_skip(spa_t *spa, uint64_t mmp_kstat_id);
extern int spa_mmp_history_set(spa_t *spa, uint64_t mmp_kstat_id, int io_error,
hrtime_t duration);
extern void spa_mmp_history_add(spa_t *spa, uint64_t txg, uint64_t timestamp,
uint64_t mmp_delay, vdev_t *vd, int label, uint64_t mmp_kstat_id,
int error);
extern void spa_iostats_trim_add(spa_t *spa, trim_type_t type,
uint64_t extents_written, uint64_t bytes_written,
uint64_t extents_skipped, uint64_t bytes_skipped,
uint64_t extents_failed, uint64_t bytes_failed);
extern void spa_import_progress_add(spa_t *spa);
extern void spa_import_progress_remove(uint64_t spa_guid);
extern int spa_import_progress_set_mmp_check(uint64_t pool_guid,
uint64_t mmp_sec_remaining);
extern int spa_import_progress_set_max_txg(uint64_t pool_guid,
uint64_t max_txg);
extern int spa_import_progress_set_state(uint64_t pool_guid,
spa_load_state_t spa_load_state);
/* Pool configuration locks */
extern int spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw);
extern void spa_config_enter(spa_t *spa, int locks, const void *tag, krw_t rw);
extern void spa_config_exit(spa_t *spa, int locks, const void *tag);
extern int spa_config_held(spa_t *spa, int locks, krw_t rw);
/* Pool vdev add/remove lock */
extern uint64_t spa_vdev_enter(spa_t *spa);
extern uint64_t spa_vdev_detach_enter(spa_t *spa, uint64_t guid);
extern uint64_t spa_vdev_config_enter(spa_t *spa);
extern void spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg,
int error, char *tag);
extern int spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error);
/* Pool vdev state change lock */
extern void spa_vdev_state_enter(spa_t *spa, int oplock);
extern int spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error);
/* Log state */
typedef enum spa_log_state {
SPA_LOG_UNKNOWN = 0, /* unknown log state */
SPA_LOG_MISSING, /* missing log(s) */
SPA_LOG_CLEAR, /* clear the log(s) */
SPA_LOG_GOOD, /* log(s) are good */
} spa_log_state_t;
extern spa_log_state_t spa_get_log_state(spa_t *spa);
extern void spa_set_log_state(spa_t *spa, spa_log_state_t state);
extern int spa_reset_logs(spa_t *spa);
/* Log claim callback */
extern void spa_claim_notify(zio_t *zio);
extern void spa_deadman(void *);
/* Accessor functions */
extern boolean_t spa_shutting_down(spa_t *spa);
extern struct dsl_pool *spa_get_dsl(spa_t *spa);
extern boolean_t spa_is_initializing(spa_t *spa);
extern boolean_t spa_indirect_vdevs_loaded(spa_t *spa);
extern blkptr_t *spa_get_rootblkptr(spa_t *spa);
extern void spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp);
extern void spa_altroot(spa_t *, char *, size_t);
extern int spa_sync_pass(spa_t *spa);
extern char *spa_name(spa_t *spa);
extern uint64_t spa_guid(spa_t *spa);
extern uint64_t spa_load_guid(spa_t *spa);
extern uint64_t spa_last_synced_txg(spa_t *spa);
extern uint64_t spa_first_txg(spa_t *spa);
extern uint64_t spa_syncing_txg(spa_t *spa);
extern uint64_t spa_final_dirty_txg(spa_t *spa);
extern uint64_t spa_version(spa_t *spa);
extern pool_state_t spa_state(spa_t *spa);
extern spa_load_state_t spa_load_state(spa_t *spa);
extern uint64_t spa_freeze_txg(spa_t *spa);
extern uint64_t spa_get_worst_case_asize(spa_t *spa, uint64_t lsize);
extern uint64_t spa_get_dspace(spa_t *spa);
extern uint64_t spa_get_checkpoint_space(spa_t *spa);
extern uint64_t spa_get_slop_space(spa_t *spa);
extern void spa_update_dspace(spa_t *spa);
extern uint64_t spa_version(spa_t *spa);
extern boolean_t spa_deflate(spa_t *spa);
extern metaslab_class_t *spa_normal_class(spa_t *spa);
extern metaslab_class_t *spa_log_class(spa_t *spa);
extern metaslab_class_t *spa_embedded_log_class(spa_t *spa);
extern metaslab_class_t *spa_special_class(spa_t *spa);
extern metaslab_class_t *spa_dedup_class(spa_t *spa);
extern metaslab_class_t *spa_preferred_class(spa_t *spa, uint64_t size,
dmu_object_type_t objtype, uint_t level, uint_t special_smallblk);
extern void spa_evicting_os_register(spa_t *, objset_t *os);
extern void spa_evicting_os_deregister(spa_t *, objset_t *os);
extern void spa_evicting_os_wait(spa_t *spa);
extern int spa_max_replication(spa_t *spa);
extern int spa_prev_software_version(spa_t *spa);
extern uint64_t spa_get_failmode(spa_t *spa);
extern uint64_t spa_get_deadman_failmode(spa_t *spa);
extern void spa_set_deadman_failmode(spa_t *spa, const char *failmode);
extern boolean_t spa_suspended(spa_t *spa);
extern uint64_t spa_bootfs(spa_t *spa);
extern uint64_t spa_delegation(spa_t *spa);
extern objset_t *spa_meta_objset(spa_t *spa);
extern space_map_t *spa_syncing_log_sm(spa_t *spa);
extern uint64_t spa_deadman_synctime(spa_t *spa);
extern uint64_t spa_deadman_ziotime(spa_t *spa);
extern uint64_t spa_dirty_data(spa_t *spa);
extern spa_autotrim_t spa_get_autotrim(spa_t *spa);
/* Miscellaneous support routines */
extern void spa_load_failed(spa_t *spa, const char *fmt, ...)
__attribute__((format(printf, 2, 3)));
extern void spa_load_note(spa_t *spa, const char *fmt, ...)
__attribute__((format(printf, 2, 3)));
extern void spa_activate_mos_feature(spa_t *spa, const char *feature,
dmu_tx_t *tx);
extern void spa_deactivate_mos_feature(spa_t *spa, const char *feature);
extern spa_t *spa_by_guid(uint64_t pool_guid, uint64_t device_guid);
extern boolean_t spa_guid_exists(uint64_t pool_guid, uint64_t device_guid);
extern char *spa_strdup(const char *);
extern void spa_strfree(char *);
extern uint64_t spa_generate_guid(spa_t *spa);
extern void snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp);
extern void spa_freeze(spa_t *spa);
extern int spa_change_guid(spa_t *spa);
extern void spa_upgrade(spa_t *spa, uint64_t version);
extern void spa_evict_all(void);
extern vdev_t *spa_lookup_by_guid(spa_t *spa, uint64_t guid,
boolean_t l2cache);
extern boolean_t spa_has_spare(spa_t *, uint64_t guid);
extern uint64_t dva_get_dsize_sync(spa_t *spa, const dva_t *dva);
extern uint64_t bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp);
extern uint64_t bp_get_dsize(spa_t *spa, const blkptr_t *bp);
extern boolean_t spa_has_slogs(spa_t *spa);
extern boolean_t spa_is_root(spa_t *spa);
extern boolean_t spa_writeable(spa_t *spa);
extern boolean_t spa_has_pending_synctask(spa_t *spa);
extern int spa_maxblocksize(spa_t *spa);
extern int spa_maxdnodesize(spa_t *spa);
extern boolean_t spa_has_checkpoint(spa_t *spa);
extern boolean_t spa_importing_readonly_checkpoint(spa_t *spa);
extern boolean_t spa_suspend_async_destroy(spa_t *spa);
extern uint64_t spa_min_claim_txg(spa_t *spa);
extern boolean_t zfs_dva_valid(spa_t *spa, const dva_t *dva,
const blkptr_t *bp);
typedef void (*spa_remap_cb_t)(uint64_t vdev, uint64_t offset, uint64_t size,
void *arg);
extern boolean_t spa_remap_blkptr(spa_t *spa, blkptr_t *bp,
spa_remap_cb_t callback, void *arg);
extern uint64_t spa_get_last_removal_txg(spa_t *spa);
extern boolean_t spa_trust_config(spa_t *spa);
extern uint64_t spa_missing_tvds_allowed(spa_t *spa);
extern void spa_set_missing_tvds(spa_t *spa, uint64_t missing);
extern boolean_t spa_top_vdevs_spacemap_addressable(spa_t *spa);
extern uint64_t spa_total_metaslabs(spa_t *spa);
extern boolean_t spa_multihost(spa_t *spa);
extern uint32_t spa_get_hostid(spa_t *spa);
extern void spa_activate_allocation_classes(spa_t *, dmu_tx_t *);
extern boolean_t spa_livelist_delete_check(spa_t *spa);
extern spa_mode_t spa_mode(spa_t *spa);
extern uint64_t zfs_strtonum(const char *str, char **nptr);
extern char *spa_his_ievent_table[];
extern void spa_history_create_obj(spa_t *spa, dmu_tx_t *tx);
extern int spa_history_get(spa_t *spa, uint64_t *offset, uint64_t *len_read,
char *his_buf);
extern int spa_history_log(spa_t *spa, const char *his_buf);
extern int spa_history_log_nvl(spa_t *spa, nvlist_t *nvl);
extern void spa_history_log_version(spa_t *spa, const char *operation,
dmu_tx_t *tx);
extern void spa_history_log_internal(spa_t *spa, const char *operation,
dmu_tx_t *tx, const char *fmt, ...) __printflike(4, 5);
extern void spa_history_log_internal_ds(struct dsl_dataset *ds, const char *op,
dmu_tx_t *tx, const char *fmt, ...) __printflike(4, 5);
extern void spa_history_log_internal_dd(dsl_dir_t *dd, const char *operation,
dmu_tx_t *tx, const char *fmt, ...) __printflike(4, 5);
extern const char *spa_state_to_name(spa_t *spa);
/* error handling */
struct zbookmark_phys;
extern void spa_log_error(spa_t *spa, const zbookmark_phys_t *zb);
extern int zfs_ereport_post(const char *clazz, spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, zio_t *zio, uint64_t state);
extern boolean_t zfs_ereport_is_valid(const char *clazz, spa_t *spa, vdev_t *vd,
zio_t *zio);
extern void zfs_ereport_taskq_fini(void);
extern void zfs_ereport_clear(spa_t *spa, vdev_t *vd);
extern nvlist_t *zfs_event_create(spa_t *spa, vdev_t *vd, const char *type,
const char *name, nvlist_t *aux);
extern void zfs_post_remove(spa_t *spa, vdev_t *vd);
extern void zfs_post_state_change(spa_t *spa, vdev_t *vd, uint64_t laststate);
extern void zfs_post_autoreplace(spa_t *spa, vdev_t *vd);
extern uint64_t spa_get_errlog_size(spa_t *spa);
extern int spa_get_errlog(spa_t *spa, void *uaddr, size_t *count);
extern void spa_errlog_rotate(spa_t *spa);
extern void spa_errlog_drain(spa_t *spa);
extern void spa_errlog_sync(spa_t *spa, uint64_t txg);
extern void spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub);
/* vdev cache */
extern void vdev_cache_stat_init(void);
extern void vdev_cache_stat_fini(void);
/* vdev mirror */
extern void vdev_mirror_stat_init(void);
extern void vdev_mirror_stat_fini(void);
/* Initialization and termination */
extern void spa_init(spa_mode_t mode);
extern void spa_fini(void);
extern void spa_boot_init(void);
/* properties */
extern int spa_prop_set(spa_t *spa, nvlist_t *nvp);
extern int spa_prop_get(spa_t *spa, nvlist_t **nvp);
extern void spa_prop_clear_bootfs(spa_t *spa, uint64_t obj, dmu_tx_t *tx);
extern void spa_configfile_set(spa_t *, nvlist_t *, boolean_t);
/* asynchronous event notification */
extern void spa_event_notify(spa_t *spa, vdev_t *vdev, nvlist_t *hist_nvl,
const char *name);
+extern void zfs_ereport_zvol_post(const char *subclass, const char *name,
+ const char *device_name, const char *raw_name);
/* waiting for pool activities to complete */
extern int spa_wait(const char *pool, zpool_wait_activity_t activity,
boolean_t *waited);
extern int spa_wait_tag(const char *name, zpool_wait_activity_t activity,
uint64_t tag, boolean_t *waited);
extern void spa_notify_waiters(spa_t *spa);
extern void spa_wake_waiters(spa_t *spa);
/* module param call functions */
int param_set_deadman_ziotime(ZFS_MODULE_PARAM_ARGS);
int param_set_deadman_synctime(ZFS_MODULE_PARAM_ARGS);
int param_set_slop_shift(ZFS_MODULE_PARAM_ARGS);
int param_set_deadman_failmode(ZFS_MODULE_PARAM_ARGS);
#ifdef ZFS_DEBUG
#define dprintf_bp(bp, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP); \
snprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, (bp)); \
dprintf(fmt " %s\n", __VA_ARGS__, __blkbuf); \
kmem_free(__blkbuf, BP_SPRINTF_LEN); \
} \
} while (0)
#else
#define dprintf_bp(bp, fmt, ...)
#endif
extern spa_mode_t spa_mode_global;
extern int zfs_deadman_enabled;
extern unsigned long zfs_deadman_synctime_ms;
extern unsigned long zfs_deadman_ziotime_ms;
extern unsigned long zfs_deadman_checktime_ms;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SPA_H */
diff --git a/sys/contrib/openzfs/include/sys/zfs_context.h b/sys/contrib/openzfs/include/sys/zfs_context.h
index 80931f98eb97..b1df9f3f3878 100644
--- a/sys/contrib/openzfs/include/sys/zfs_context.h
+++ b/sys/contrib/openzfs/include/sys/zfs_context.h
@@ -1,783 +1,782 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
*/
#ifndef _SYS_ZFS_CONTEXT_H
#define _SYS_ZFS_CONTEXT_H
#ifdef __cplusplus
extern "C" {
#endif
/*
* This code compiles in three different contexts. When __KERNEL__ is defined,
* the code uses "unix-like" kernel interfaces. When _STANDALONE is defined, the
* code is running in a reduced capacity environment of the boot loader which is
* generally a subset of both POSIX and kernel interfaces (with a few unique
* interfaces too). When neither are defined, it's in a userland POSIX or
* similar environment.
*/
#if defined(__KERNEL__) || defined(_STANDALONE)
#include <sys/types.h>
#include <sys/atomic.h>
#include <sys/sysmacros.h>
#include <sys/vmsystm.h>
#include <sys/condvar.h>
#include <sys/cmn_err.h>
#include <sys/kmem.h>
#include <sys/kmem_cache.h>
#include <sys/vmem.h>
#include <sys/taskq.h>
#include <sys/param.h>
#include <sys/disp.h>
#include <sys/debug.h>
#include <sys/random.h>
#include <sys/strings.h>
#include <sys/byteorder.h>
#include <sys/list.h>
#include <sys/time.h>
#include <sys/zone.h>
#include <sys/kstat.h>
#include <sys/zfs_debug.h>
#include <sys/sysevent.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/zfs_delay.h>
#include <sys/sunddi.h>
#include <sys/ctype.h>
#include <sys/disp.h>
#include <sys/trace.h>
#include <sys/procfs_list.h>
#include <sys/mod.h>
#include <sys/uio_impl.h>
#include <sys/zfs_context_os.h>
#else /* _KERNEL || _STANDALONE */
#define _SYS_MUTEX_H
#define _SYS_RWLOCK_H
#define _SYS_CONDVAR_H
#define _SYS_VNODE_H
#define _SYS_VFS_H
#define _SYS_SUNDDI_H
#define _SYS_CALLB_H
#include <stdio.h>
#include <stdlib.h>
#include <stddef.h>
#include <stdarg.h>
#include <fcntl.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <strings.h>
#include <pthread.h>
#include <setjmp.h>
#include <assert.h>
#include <umem.h>
#include <limits.h>
#include <atomic.h>
#include <dirent.h>
#include <time.h>
#include <ctype.h>
#include <signal.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/cred.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/byteorder.h>
#include <sys/list.h>
#include <sys/mod.h>
#include <sys/uio.h>
#include <sys/zfs_debug.h>
#include <sys/kstat.h>
#include <sys/u8_textprep.h>
#include <sys/sysevent.h>
#include <sys/sysevent/eventdefs.h>
#include <sys/sunddi.h>
#include <sys/debug.h>
#include <sys/utsname.h>
#include <sys/trace_zfs.h>
#include <sys/zfs_context_os.h>
/*
* Stack
*/
#define noinline __attribute__((noinline))
#define likely(x) __builtin_expect((x), 1)
#define unlikely(x) __builtin_expect((x), 0)
/*
* Debugging
*/
/*
* Note that we are not using the debugging levels.
*/
#define CE_CONT 0 /* continuation */
#define CE_NOTE 1 /* notice */
#define CE_WARN 2 /* warning */
#define CE_PANIC 3 /* panic */
#define CE_IGNORE 4 /* print nothing */
/*
* ZFS debugging
*/
extern void dprintf_setup(int *argc, char **argv);
extern void cmn_err(int, const char *, ...);
extern void vcmn_err(int, const char *, va_list);
extern void panic(const char *, ...) __NORETURN;
extern void vpanic(const char *, va_list) __NORETURN;
#define fm_panic panic
/*
* DTrace SDT probes have different signatures in userland than they do in
* the kernel. If they're being used in kernel code, re-define them out of
* existence for their counterparts in libzpool.
*
* Here's an example of how to use the set-error probes in userland:
* zfs$target:::set-error /arg0 == EBUSY/ {stack();}
*
* Here's an example of how to use DTRACE_PROBE probes in userland:
* If there is a probe declared as follows:
* DTRACE_PROBE2(zfs__probe_name, uint64_t, blkid, dnode_t *, dn);
* Then you can use it as follows:
* zfs$target:::probe2 /copyinstr(arg0) == "zfs__probe_name"/
* {printf("%u %p\n", arg1, arg2);}
*/
#ifdef DTRACE_PROBE
#undef DTRACE_PROBE
#endif /* DTRACE_PROBE */
#define DTRACE_PROBE(a)
#ifdef DTRACE_PROBE1
#undef DTRACE_PROBE1
#endif /* DTRACE_PROBE1 */
#define DTRACE_PROBE1(a, b, c)
#ifdef DTRACE_PROBE2
#undef DTRACE_PROBE2
#endif /* DTRACE_PROBE2 */
#define DTRACE_PROBE2(a, b, c, d, e)
#ifdef DTRACE_PROBE3
#undef DTRACE_PROBE3
#endif /* DTRACE_PROBE3 */
#define DTRACE_PROBE3(a, b, c, d, e, f, g)
#ifdef DTRACE_PROBE4
#undef DTRACE_PROBE4
#endif /* DTRACE_PROBE4 */
#define DTRACE_PROBE4(a, b, c, d, e, f, g, h, i)
/*
* Tunables.
*/
typedef struct zfs_kernel_param {
const char *name; /* unused stub */
} zfs_kernel_param_t;
#define ZFS_MODULE_PARAM(scope_prefix, name_prefix, name, type, perm, desc)
#define ZFS_MODULE_PARAM_ARGS void
#define ZFS_MODULE_PARAM_CALL(scope_prefix, name_prefix, name, setfunc, \
getfunc, perm, desc)
/*
* Threads.
*/
typedef pthread_t kthread_t;
#define TS_RUN 0x00000002
#define TS_JOINABLE 0x00000004
#define curthread ((void *)(uintptr_t)pthread_self())
#define kpreempt(x) yield()
#define getcomm() "unknown"
#define thread_create_named(name, stk, stksize, func, arg, len, \
pp, state, pri) \
zk_thread_create(func, arg, stksize, state)
#define thread_create(stk, stksize, func, arg, len, pp, state, pri) \
zk_thread_create(func, arg, stksize, state)
#define thread_exit() pthread_exit(NULL)
#define thread_join(t) pthread_join((pthread_t)(t), NULL)
#define newproc(f, a, cid, pri, ctp, pid) (ENOSYS)
/* in libzpool, p0 exists only to have its address taken */
typedef struct proc {
uintptr_t this_is_never_used_dont_dereference_it;
} proc_t;
extern struct proc p0;
#define curproc (&p0)
#define PS_NONE -1
extern kthread_t *zk_thread_create(void (*func)(void *), void *arg,
size_t stksize, int state);
#define issig(why) (FALSE)
#define ISSIG(thr, why) (FALSE)
#define kpreempt_disable() ((void)0)
#define kpreempt_enable() ((void)0)
#define cond_resched() sched_yield()
/*
* Mutexes
*/
typedef struct kmutex {
pthread_mutex_t m_lock;
pthread_t m_owner;
} kmutex_t;
#define MUTEX_DEFAULT 0
#define MUTEX_NOLOCKDEP MUTEX_DEFAULT
#define MUTEX_HELD(mp) pthread_equal((mp)->m_owner, pthread_self())
#define MUTEX_NOT_HELD(mp) !MUTEX_HELD(mp)
extern void mutex_init(kmutex_t *mp, char *name, int type, void *cookie);
extern void mutex_destroy(kmutex_t *mp);
extern void mutex_enter(kmutex_t *mp);
extern void mutex_exit(kmutex_t *mp);
extern int mutex_tryenter(kmutex_t *mp);
#define NESTED_SINGLE 1
#define mutex_enter_nested(mp, class) mutex_enter(mp)
/*
* RW locks
*/
typedef struct krwlock {
pthread_rwlock_t rw_lock;
pthread_t rw_owner;
uint_t rw_readers;
} krwlock_t;
typedef int krw_t;
#define RW_READER 0
#define RW_WRITER 1
#define RW_DEFAULT RW_READER
#define RW_NOLOCKDEP RW_READER
#define RW_READ_HELD(rw) ((rw)->rw_readers > 0)
#define RW_WRITE_HELD(rw) pthread_equal((rw)->rw_owner, pthread_self())
#define RW_LOCK_HELD(rw) (RW_READ_HELD(rw) || RW_WRITE_HELD(rw))
extern void rw_init(krwlock_t *rwlp, char *name, int type, void *arg);
extern void rw_destroy(krwlock_t *rwlp);
extern void rw_enter(krwlock_t *rwlp, krw_t rw);
extern int rw_tryenter(krwlock_t *rwlp, krw_t rw);
extern int rw_tryupgrade(krwlock_t *rwlp);
extern void rw_exit(krwlock_t *rwlp);
#define rw_downgrade(rwlp) do { } while (0)
/*
* Credentials
*/
extern uid_t crgetuid(cred_t *cr);
extern uid_t crgetruid(cred_t *cr);
extern gid_t crgetgid(cred_t *cr);
extern int crgetngroups(cred_t *cr);
extern gid_t *crgetgroups(cred_t *cr);
/*
* Condition variables
*/
typedef pthread_cond_t kcondvar_t;
#define CV_DEFAULT 0
#define CALLOUT_FLAG_ABSOLUTE 0x2
extern void cv_init(kcondvar_t *cv, char *name, int type, void *arg);
extern void cv_destroy(kcondvar_t *cv);
extern void cv_wait(kcondvar_t *cv, kmutex_t *mp);
extern int cv_wait_sig(kcondvar_t *cv, kmutex_t *mp);
extern int cv_timedwait(kcondvar_t *cv, kmutex_t *mp, clock_t abstime);
extern int cv_timedwait_hires(kcondvar_t *cvp, kmutex_t *mp, hrtime_t tim,
hrtime_t res, int flag);
extern void cv_signal(kcondvar_t *cv);
extern void cv_broadcast(kcondvar_t *cv);
#define cv_timedwait_io(cv, mp, at) cv_timedwait(cv, mp, at)
#define cv_timedwait_idle(cv, mp, at) cv_timedwait(cv, mp, at)
#define cv_timedwait_sig(cv, mp, at) cv_timedwait(cv, mp, at)
#define cv_wait_io(cv, mp) cv_wait(cv, mp)
#define cv_wait_idle(cv, mp) cv_wait(cv, mp)
#define cv_wait_io_sig(cv, mp) cv_wait_sig(cv, mp)
#define cv_timedwait_sig_hires(cv, mp, t, r, f) \
cv_timedwait_hires(cv, mp, t, r, f)
#define cv_timedwait_idle_hires(cv, mp, t, r, f) \
cv_timedwait_hires(cv, mp, t, r, f)
/*
* Thread-specific data
*/
#define tsd_get(k) pthread_getspecific(k)
#define tsd_set(k, v) pthread_setspecific(k, v)
#define tsd_create(kp, d) pthread_key_create((pthread_key_t *)kp, d)
#define tsd_destroy(kp) /* nothing */
#ifdef __FreeBSD__
typedef off_t loff_t;
#endif
/*
* kstat creation, installation and deletion
*/
extern kstat_t *kstat_create(const char *, int,
const char *, const char *, uchar_t, ulong_t, uchar_t);
extern void kstat_install(kstat_t *);
extern void kstat_delete(kstat_t *);
extern void kstat_set_raw_ops(kstat_t *ksp,
int (*headers)(char *buf, size_t size),
int (*data)(char *buf, size_t size, void *data),
void *(*addr)(kstat_t *ksp, loff_t index));
/*
* procfs list manipulation
*/
typedef struct procfs_list {
void *pl_private;
kmutex_t pl_lock;
list_t pl_list;
uint64_t pl_next_id;
size_t pl_node_offset;
} procfs_list_t;
#ifndef __cplusplus
struct seq_file { };
void seq_printf(struct seq_file *m, const char *fmt, ...);
typedef struct procfs_list_node {
list_node_t pln_link;
uint64_t pln_id;
} procfs_list_node_t;
void procfs_list_install(const char *module,
const char *submodule,
const char *name,
mode_t mode,
procfs_list_t *procfs_list,
int (*show)(struct seq_file *f, void *p),
int (*show_header)(struct seq_file *f),
int (*clear)(procfs_list_t *procfs_list),
size_t procfs_list_node_off);
void procfs_list_uninstall(procfs_list_t *procfs_list);
void procfs_list_destroy(procfs_list_t *procfs_list);
void procfs_list_add(procfs_list_t *procfs_list, void *p);
#endif
/*
* Kernel memory
*/
#define KM_SLEEP UMEM_NOFAIL
#define KM_PUSHPAGE KM_SLEEP
#define KM_NOSLEEP UMEM_DEFAULT
#define KM_NORMALPRI 0 /* not needed with UMEM_DEFAULT */
#define KMC_NODEBUG UMC_NODEBUG
#define KMC_KVMEM 0x0
#define kmem_alloc(_s, _f) umem_alloc(_s, _f)
#define kmem_zalloc(_s, _f) umem_zalloc(_s, _f)
#define kmem_free(_b, _s) umem_free(_b, _s)
#define vmem_alloc(_s, _f) kmem_alloc(_s, _f)
#define vmem_zalloc(_s, _f) kmem_zalloc(_s, _f)
#define vmem_free(_b, _s) kmem_free(_b, _s)
#define kmem_cache_create(_a, _b, _c, _d, _e, _f, _g, _h, _i) \
umem_cache_create(_a, _b, _c, _d, _e, _f, _g, _h, _i)
#define kmem_cache_destroy(_c) umem_cache_destroy(_c)
#define kmem_cache_alloc(_c, _f) umem_cache_alloc(_c, _f)
#define kmem_cache_free(_c, _b) umem_cache_free(_c, _b)
#define kmem_debugging() 0
#define kmem_cache_reap_now(_c) umem_cache_reap_now(_c);
#define kmem_cache_set_move(_c, _cb) /* nothing */
#define POINTER_INVALIDATE(_pp) /* nothing */
#define POINTER_IS_VALID(_p) 0
typedef umem_cache_t kmem_cache_t;
typedef enum kmem_cbrc {
KMEM_CBRC_YES,
KMEM_CBRC_NO,
KMEM_CBRC_LATER,
KMEM_CBRC_DONT_NEED,
KMEM_CBRC_DONT_KNOW
} kmem_cbrc_t;
/*
* Task queues
*/
#define TASKQ_NAMELEN 31
typedef uintptr_t taskqid_t;
typedef void (task_func_t)(void *);
typedef struct taskq_ent {
struct taskq_ent *tqent_next;
struct taskq_ent *tqent_prev;
task_func_t *tqent_func;
void *tqent_arg;
uintptr_t tqent_flags;
} taskq_ent_t;
typedef struct taskq {
char tq_name[TASKQ_NAMELEN + 1];
kmutex_t tq_lock;
krwlock_t tq_threadlock;
kcondvar_t tq_dispatch_cv;
kcondvar_t tq_wait_cv;
kthread_t **tq_threadlist;
int tq_flags;
int tq_active;
int tq_nthreads;
int tq_nalloc;
int tq_minalloc;
int tq_maxalloc;
kcondvar_t tq_maxalloc_cv;
int tq_maxalloc_wait;
taskq_ent_t *tq_freelist;
taskq_ent_t tq_task;
} taskq_t;
#define TQENT_FLAG_PREALLOC 0x1 /* taskq_dispatch_ent used */
#define TASKQ_PREPOPULATE 0x0001
#define TASKQ_CPR_SAFE 0x0002 /* Use CPR safe protocol */
#define TASKQ_DYNAMIC 0x0004 /* Use dynamic thread scheduling */
#define TASKQ_THREADS_CPU_PCT 0x0008 /* Scale # threads by # cpus */
#define TASKQ_DC_BATCH 0x0010 /* Mark threads as batch */
#define TQ_SLEEP KM_SLEEP /* Can block for memory */
#define TQ_NOSLEEP KM_NOSLEEP /* cannot block for memory; may fail */
#define TQ_NOQUEUE 0x02 /* Do not enqueue if can't dispatch */
#define TQ_FRONT 0x08 /* Queue in front */
#define TASKQID_INVALID ((taskqid_t)0)
extern taskq_t *system_taskq;
extern taskq_t *system_delay_taskq;
extern taskq_t *taskq_create(const char *, int, pri_t, int, int, uint_t);
#define taskq_create_proc(a, b, c, d, e, p, f) \
(taskq_create(a, b, c, d, e, f))
#define taskq_create_sysdc(a, b, d, e, p, dc, f) \
(taskq_create(a, b, maxclsyspri, d, e, f))
extern taskqid_t taskq_dispatch(taskq_t *, task_func_t, void *, uint_t);
extern taskqid_t taskq_dispatch_delay(taskq_t *, task_func_t, void *, uint_t,
clock_t);
extern void taskq_dispatch_ent(taskq_t *, task_func_t, void *, uint_t,
taskq_ent_t *);
extern int taskq_empty_ent(taskq_ent_t *);
extern void taskq_init_ent(taskq_ent_t *);
extern void taskq_destroy(taskq_t *);
extern void taskq_wait(taskq_t *);
extern void taskq_wait_id(taskq_t *, taskqid_t);
extern void taskq_wait_outstanding(taskq_t *, taskqid_t);
extern int taskq_member(taskq_t *, kthread_t *);
extern taskq_t *taskq_of_curthread(void);
extern int taskq_cancel_id(taskq_t *, taskqid_t);
extern void system_taskq_init(void);
extern void system_taskq_fini(void);
#define XVA_MAPSIZE 3
#define XVA_MAGIC 0x78766174
extern char *vn_dumpdir;
#define AV_SCANSTAMP_SZ 32 /* length of anti-virus scanstamp */
typedef struct xoptattr {
inode_timespec_t xoa_createtime; /* Create time of file */
uint8_t xoa_archive;
uint8_t xoa_system;
uint8_t xoa_readonly;
uint8_t xoa_hidden;
uint8_t xoa_nounlink;
uint8_t xoa_immutable;
uint8_t xoa_appendonly;
uint8_t xoa_nodump;
uint8_t xoa_settable;
uint8_t xoa_opaque;
uint8_t xoa_av_quarantined;
uint8_t xoa_av_modified;
uint8_t xoa_av_scanstamp[AV_SCANSTAMP_SZ];
uint8_t xoa_reparse;
uint8_t xoa_offline;
uint8_t xoa_sparse;
} xoptattr_t;
typedef struct vattr {
uint_t va_mask; /* bit-mask of attributes */
u_offset_t va_size; /* file size in bytes */
} vattr_t;
typedef struct xvattr {
vattr_t xva_vattr; /* Embedded vattr structure */
uint32_t xva_magic; /* Magic Number */
uint32_t xva_mapsize; /* Size of attr bitmap (32-bit words) */
uint32_t *xva_rtnattrmapp; /* Ptr to xva_rtnattrmap[] */
uint32_t xva_reqattrmap[XVA_MAPSIZE]; /* Requested attrs */
uint32_t xva_rtnattrmap[XVA_MAPSIZE]; /* Returned attrs */
xoptattr_t xva_xoptattrs; /* Optional attributes */
} xvattr_t;
typedef struct vsecattr {
uint_t vsa_mask; /* See below */
int vsa_aclcnt; /* ACL entry count */
void *vsa_aclentp; /* pointer to ACL entries */
int vsa_dfaclcnt; /* default ACL entry count */
void *vsa_dfaclentp; /* pointer to default ACL entries */
size_t vsa_aclentsz; /* ACE size in bytes of vsa_aclentp */
} vsecattr_t;
#define AT_MODE 0x00002
#define AT_UID 0x00004
#define AT_GID 0x00008
#define AT_FSID 0x00010
#define AT_NODEID 0x00020
#define AT_NLINK 0x00040
#define AT_SIZE 0x00080
#define AT_ATIME 0x00100
#define AT_MTIME 0x00200
#define AT_CTIME 0x00400
#define AT_RDEV 0x00800
#define AT_BLKSIZE 0x01000
#define AT_NBLOCKS 0x02000
#define AT_SEQ 0x08000
#define AT_XVATTR 0x10000
#define CRCREAT 0
#define F_FREESP 11
#define FIGNORECASE 0x80000 /* request case-insensitive lookups */
/*
* Random stuff
*/
#define ddi_get_lbolt() (gethrtime() >> 23)
#define ddi_get_lbolt64() (gethrtime() >> 23)
#define hz 119 /* frequency when using gethrtime() >> 23 for lbolt */
#define ddi_time_before(a, b) (a < b)
#define ddi_time_after(a, b) ddi_time_before(b, a)
#define ddi_time_before_eq(a, b) (!ddi_time_after(a, b))
#define ddi_time_after_eq(a, b) ddi_time_before_eq(b, a)
#define ddi_time_before64(a, b) (a < b)
#define ddi_time_after64(a, b) ddi_time_before64(b, a)
#define ddi_time_before_eq64(a, b) (!ddi_time_after64(a, b))
#define ddi_time_after_eq64(a, b) ddi_time_before_eq64(b, a)
extern void delay(clock_t ticks);
#define SEC_TO_TICK(sec) ((sec) * hz)
#define MSEC_TO_TICK(msec) (howmany((hrtime_t)(msec) * hz, MILLISEC))
#define USEC_TO_TICK(usec) (howmany((hrtime_t)(usec) * hz, MICROSEC))
#define NSEC_TO_TICK(nsec) (howmany((hrtime_t)(nsec) * hz, NANOSEC))
#define max_ncpus 64
#define boot_ncpus (sysconf(_SC_NPROCESSORS_ONLN))
/*
* Process priorities as defined by setpriority(2) and getpriority(2).
*/
#define minclsyspri 19
#define maxclsyspri -20
#define defclsyspri 0
#define CPU_SEQID ((uintptr_t)pthread_self() & (max_ncpus - 1))
#define CPU_SEQID_UNSTABLE CPU_SEQID
#define kcred NULL
#define CRED() NULL
#define ptob(x) ((x) * PAGESIZE)
#define NN_DIVISOR_1000 (1U << 0)
#define NN_NUMBUF_SZ (6)
extern uint64_t physmem;
extern const char *random_path;
extern const char *urandom_path;
extern int highbit64(uint64_t i);
extern int lowbit64(uint64_t i);
extern int random_get_bytes(uint8_t *ptr, size_t len);
extern int random_get_pseudo_bytes(uint8_t *ptr, size_t len);
static __inline__ uint32_t
random_in_range(uint32_t range)
{
uint32_t r;
ASSERT(range != 0);
if (range == 1)
return (0);
(void) random_get_pseudo_bytes((uint8_t *)&r, sizeof (r));
return (r % range);
}
extern void kernel_init(int mode);
extern void kernel_fini(void);
extern void random_init(void);
extern void random_fini(void);
struct spa;
extern void show_pool_stats(struct spa *);
extern int set_global_var(char const *arg);
typedef struct callb_cpr {
kmutex_t *cc_lockp;
} callb_cpr_t;
#define CALLB_CPR_INIT(cp, lockp, func, name) { \
(cp)->cc_lockp = lockp; \
}
#define CALLB_CPR_SAFE_BEGIN(cp) { \
ASSERT(MUTEX_HELD((cp)->cc_lockp)); \
}
#define CALLB_CPR_SAFE_END(cp, lockp) { \
ASSERT(MUTEX_HELD((cp)->cc_lockp)); \
}
#define CALLB_CPR_EXIT(cp) { \
ASSERT(MUTEX_HELD((cp)->cc_lockp)); \
mutex_exit((cp)->cc_lockp); \
}
#define zone_dataset_visible(x, y) (1)
#define INGLOBALZONE(z) (1)
extern uint32_t zone_get_hostid(void *zonep);
extern char *kmem_vasprintf(const char *fmt, va_list adx);
extern char *kmem_asprintf(const char *fmt, ...);
#define kmem_strfree(str) kmem_free((str), strlen(str) + 1)
#define kmem_strdup(s) strdup(s)
/*
* Hostname information
*/
extern char hw_serial[]; /* for userland-emulated hostid access */
extern int ddi_strtoul(const char *str, char **nptr, int base,
unsigned long *result);
extern int ddi_strtoull(const char *str, char **nptr, int base,
u_longlong_t *result);
typedef struct utsname utsname_t;
extern utsname_t *utsname(void);
/* ZFS Boot Related stuff. */
struct _buf {
intptr_t _fd;
};
struct bootstat {
uint64_t st_size;
};
typedef struct ace_object {
uid_t a_who;
uint32_t a_access_mask;
uint16_t a_flags;
uint16_t a_type;
uint8_t a_obj_type[16];
uint8_t a_inherit_obj_type[16];
} ace_object_t;
#define ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE 0x05
#define ACE_ACCESS_DENIED_OBJECT_ACE_TYPE 0x06
#define ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE 0x07
#define ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE 0x08
extern int zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr);
extern int zfs_secpolicy_rename_perms(const char *from, const char *to,
cred_t *cr);
extern int zfs_secpolicy_destroy_perms(const char *name, cred_t *cr);
extern int secpolicy_zfs(const cred_t *cr);
extern int secpolicy_zfs_proc(const cred_t *cr, proc_t *proc);
extern zoneid_t getzoneid(void);
/* SID stuff */
typedef struct ksiddomain {
uint_t kd_ref;
uint_t kd_len;
char *kd_name;
} ksiddomain_t;
ksiddomain_t *ksid_lookupdomain(const char *);
void ksiddomain_rele(ksiddomain_t *);
#define DDI_SLEEP KM_SLEEP
#define ddi_log_sysevent(_a, _b, _c, _d, _e, _f, _g) \
sysevent_post_event(_c, _d, _b, "libzpool", _e, _f)
#define zfs_sleep_until(wakeup) \
do { \
hrtime_t delta = wakeup - gethrtime(); \
struct timespec ts; \
ts.tv_sec = delta / NANOSEC; \
ts.tv_nsec = delta % NANOSEC; \
(void) nanosleep(&ts, NULL); \
} while (0)
typedef int fstrans_cookie_t;
extern fstrans_cookie_t spl_fstrans_mark(void);
extern void spl_fstrans_unmark(fstrans_cookie_t);
extern int __spl_pf_fstrans_check(void);
extern int kmem_cache_reap_active(void);
-#define ____cacheline_aligned
/*
* Kernel modules
*/
#define __init
#define __exit
#endif /* _KERNEL || _STANDALONE */
#ifdef __cplusplus
};
#endif
#endif /* _SYS_ZFS_CONTEXT_H */
diff --git a/sys/contrib/openzfs/include/sys/zio.h b/sys/contrib/openzfs/include/sys/zio.h
index 2d34481f6be6..5b606eaf8d50 100644
--- a/sys/contrib/openzfs/include/sys/zio.h
+++ b/sys/contrib/openzfs/include/sys/zio.h
@@ -1,690 +1,692 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019-2020, Michael Niewöhner
*/
#ifndef _ZIO_H
#define _ZIO_H
#include <sys/zio_priority.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/fs/zfs.h>
#include <sys/zio_impl.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Embedded checksum
*/
#define ZEC_MAGIC 0x210da7ab10c7a11ULL
typedef struct zio_eck {
uint64_t zec_magic; /* for validation, endianness */
zio_cksum_t zec_cksum; /* 256-bit checksum */
} zio_eck_t;
/*
* Gang block headers are self-checksumming and contain an array
* of block pointers.
*/
#define SPA_GANGBLOCKSIZE SPA_MINBLOCKSIZE
#define SPA_GBH_NBLKPTRS ((SPA_GANGBLOCKSIZE - \
sizeof (zio_eck_t)) / sizeof (blkptr_t))
#define SPA_GBH_FILLER ((SPA_GANGBLOCKSIZE - \
sizeof (zio_eck_t) - \
(SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\
sizeof (uint64_t))
typedef struct zio_gbh {
blkptr_t zg_blkptr[SPA_GBH_NBLKPTRS];
uint64_t zg_filler[SPA_GBH_FILLER];
zio_eck_t zg_tail;
} zio_gbh_phys_t;
enum zio_checksum {
ZIO_CHECKSUM_INHERIT = 0,
ZIO_CHECKSUM_ON,
ZIO_CHECKSUM_OFF,
ZIO_CHECKSUM_LABEL,
ZIO_CHECKSUM_GANG_HEADER,
ZIO_CHECKSUM_ZILOG,
ZIO_CHECKSUM_FLETCHER_2,
ZIO_CHECKSUM_FLETCHER_4,
ZIO_CHECKSUM_SHA256,
ZIO_CHECKSUM_ZILOG2,
ZIO_CHECKSUM_NOPARITY,
ZIO_CHECKSUM_SHA512,
ZIO_CHECKSUM_SKEIN,
#if !defined(__FreeBSD__)
ZIO_CHECKSUM_EDONR,
#endif
ZIO_CHECKSUM_FUNCTIONS
};
/*
* The number of "legacy" compression functions which can be set on individual
* objects.
*/
#define ZIO_CHECKSUM_LEGACY_FUNCTIONS ZIO_CHECKSUM_ZILOG2
#define ZIO_CHECKSUM_ON_VALUE ZIO_CHECKSUM_FLETCHER_4
#define ZIO_CHECKSUM_DEFAULT ZIO_CHECKSUM_ON
#define ZIO_CHECKSUM_MASK 0xffULL
#define ZIO_CHECKSUM_VERIFY (1 << 8)
#define ZIO_DEDUPCHECKSUM ZIO_CHECKSUM_SHA256
/* macros defining encryption lengths */
#define ZIO_OBJSET_MAC_LEN 32
#define ZIO_DATA_IV_LEN 12
#define ZIO_DATA_SALT_LEN 8
#define ZIO_DATA_MAC_LEN 16
/*
* The number of "legacy" compression functions which can be set on individual
* objects.
*/
#define ZIO_COMPRESS_LEGACY_FUNCTIONS ZIO_COMPRESS_LZ4
/*
* The meaning of "compress = on" selected by the compression features enabled
* on a given pool.
*/
#define ZIO_COMPRESS_LEGACY_ON_VALUE ZIO_COMPRESS_LZJB
#define ZIO_COMPRESS_LZ4_ON_VALUE ZIO_COMPRESS_LZ4
#define ZIO_COMPRESS_DEFAULT ZIO_COMPRESS_OFF
#define BOOTFS_COMPRESS_VALID(compress) \
((compress) == ZIO_COMPRESS_LZJB || \
(compress) == ZIO_COMPRESS_LZ4 || \
(compress) == ZIO_COMPRESS_GZIP_1 || \
(compress) == ZIO_COMPRESS_GZIP_2 || \
(compress) == ZIO_COMPRESS_GZIP_3 || \
(compress) == ZIO_COMPRESS_GZIP_4 || \
(compress) == ZIO_COMPRESS_GZIP_5 || \
(compress) == ZIO_COMPRESS_GZIP_6 || \
(compress) == ZIO_COMPRESS_GZIP_7 || \
(compress) == ZIO_COMPRESS_GZIP_8 || \
(compress) == ZIO_COMPRESS_GZIP_9 || \
(compress) == ZIO_COMPRESS_ZLE || \
(compress) == ZIO_COMPRESS_ZSTD || \
(compress) == ZIO_COMPRESS_ON || \
(compress) == ZIO_COMPRESS_OFF)
#define ZIO_COMPRESS_ALGO(x) (x & SPA_COMPRESSMASK)
#define ZIO_COMPRESS_LEVEL(x) ((x & ~SPA_COMPRESSMASK) >> SPA_COMPRESSBITS)
#define ZIO_COMPRESS_RAW(type, level) (type | ((level) << SPA_COMPRESSBITS))
#define ZIO_COMPLEVEL_ZSTD(level) \
ZIO_COMPRESS_RAW(ZIO_COMPRESS_ZSTD, level)
#define ZIO_FAILURE_MODE_WAIT 0
#define ZIO_FAILURE_MODE_CONTINUE 1
#define ZIO_FAILURE_MODE_PANIC 2
typedef enum zio_suspend_reason {
ZIO_SUSPEND_NONE = 0,
ZIO_SUSPEND_IOERR,
ZIO_SUSPEND_MMP,
} zio_suspend_reason_t;
enum zio_flag {
/*
* Flags inherited by gang, ddt, and vdev children,
* and that must be equal for two zios to aggregate
*/
ZIO_FLAG_DONT_AGGREGATE = 1 << 0,
ZIO_FLAG_IO_REPAIR = 1 << 1,
ZIO_FLAG_SELF_HEAL = 1 << 2,
ZIO_FLAG_RESILVER = 1 << 3,
ZIO_FLAG_SCRUB = 1 << 4,
ZIO_FLAG_SCAN_THREAD = 1 << 5,
ZIO_FLAG_PHYSICAL = 1 << 6,
#define ZIO_FLAG_AGG_INHERIT (ZIO_FLAG_CANFAIL - 1)
/*
* Flags inherited by ddt, gang, and vdev children.
*/
ZIO_FLAG_CANFAIL = 1 << 7, /* must be first for INHERIT */
ZIO_FLAG_SPECULATIVE = 1 << 8,
ZIO_FLAG_CONFIG_WRITER = 1 << 9,
ZIO_FLAG_DONT_RETRY = 1 << 10,
ZIO_FLAG_DONT_CACHE = 1 << 11,
ZIO_FLAG_NODATA = 1 << 12,
ZIO_FLAG_INDUCE_DAMAGE = 1 << 13,
ZIO_FLAG_IO_ALLOCATING = 1 << 14,
#define ZIO_FLAG_DDT_INHERIT (ZIO_FLAG_IO_RETRY - 1)
#define ZIO_FLAG_GANG_INHERIT (ZIO_FLAG_IO_RETRY - 1)
/*
* Flags inherited by vdev children.
*/
ZIO_FLAG_IO_RETRY = 1 << 15, /* must be first for INHERIT */
ZIO_FLAG_PROBE = 1 << 16,
ZIO_FLAG_TRYHARD = 1 << 17,
ZIO_FLAG_OPTIONAL = 1 << 18,
#define ZIO_FLAG_VDEV_INHERIT (ZIO_FLAG_DONT_QUEUE - 1)
/*
* Flags not inherited by any children.
*/
ZIO_FLAG_DONT_QUEUE = 1 << 19, /* must be first for INHERIT */
ZIO_FLAG_DONT_PROPAGATE = 1 << 20,
ZIO_FLAG_IO_BYPASS = 1 << 21,
ZIO_FLAG_IO_REWRITE = 1 << 22,
ZIO_FLAG_RAW_COMPRESS = 1 << 23,
ZIO_FLAG_RAW_ENCRYPT = 1 << 24,
ZIO_FLAG_GANG_CHILD = 1 << 25,
ZIO_FLAG_DDT_CHILD = 1 << 26,
ZIO_FLAG_GODFATHER = 1 << 27,
ZIO_FLAG_NOPWRITE = 1 << 28,
ZIO_FLAG_REEXECUTED = 1 << 29,
ZIO_FLAG_DELEGATED = 1 << 30,
ZIO_FLAG_FASTWRITE = 1 << 31,
};
#define ZIO_FLAG_MUSTSUCCEED 0
#define ZIO_FLAG_RAW (ZIO_FLAG_RAW_COMPRESS | ZIO_FLAG_RAW_ENCRYPT)
#define ZIO_DDT_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_DDT_INHERIT) | \
ZIO_FLAG_DDT_CHILD | ZIO_FLAG_CANFAIL)
#define ZIO_GANG_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_GANG_INHERIT) | \
ZIO_FLAG_GANG_CHILD | ZIO_FLAG_CANFAIL)
#define ZIO_VDEV_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_VDEV_INHERIT) | \
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_CANFAIL)
#define ZIO_CHILD_BIT(x) (1 << (x))
#define ZIO_CHILD_BIT_IS_SET(val, x) ((val) & (1 << (x)))
enum zio_child {
ZIO_CHILD_VDEV = 0,
ZIO_CHILD_GANG,
ZIO_CHILD_DDT,
ZIO_CHILD_LOGICAL,
ZIO_CHILD_TYPES
};
#define ZIO_CHILD_VDEV_BIT ZIO_CHILD_BIT(ZIO_CHILD_VDEV)
#define ZIO_CHILD_GANG_BIT ZIO_CHILD_BIT(ZIO_CHILD_GANG)
#define ZIO_CHILD_DDT_BIT ZIO_CHILD_BIT(ZIO_CHILD_DDT)
#define ZIO_CHILD_LOGICAL_BIT ZIO_CHILD_BIT(ZIO_CHILD_LOGICAL)
#define ZIO_CHILD_ALL_BITS \
(ZIO_CHILD_VDEV_BIT | ZIO_CHILD_GANG_BIT | \
ZIO_CHILD_DDT_BIT | ZIO_CHILD_LOGICAL_BIT)
enum zio_wait_type {
ZIO_WAIT_READY = 0,
ZIO_WAIT_DONE,
ZIO_WAIT_TYPES
};
typedef void zio_done_func_t(zio_t *zio);
extern int zio_exclude_metadata;
extern int zio_dva_throttle_enabled;
extern const char *zio_type_name[ZIO_TYPES];
/*
* A bookmark is a four-tuple <objset, object, level, blkid> that uniquely
* identifies any block in the pool. By convention, the meta-objset (MOS)
* is objset 0, and the meta-dnode is object 0. This covers all blocks
* except root blocks and ZIL blocks, which are defined as follows:
*
* Root blocks (objset_phys_t) are object 0, level -1: <objset, 0, -1, 0>.
* ZIL blocks are bookmarked <objset, 0, -2, blkid == ZIL sequence number>.
* dmu_sync()ed ZIL data blocks are bookmarked <objset, object, -2, blkid>.
* dnode visit bookmarks are <objset, object id of dnode, -3, 0>.
*
* Note: this structure is called a bookmark because its original purpose
* was to remember where to resume a pool-wide traverse.
*
* Note: this structure is passed between userland and the kernel, and is
* stored on disk (by virtue of being incorporated into other on-disk
* structures, e.g. dsl_scan_phys_t).
*/
struct zbookmark_phys {
uint64_t zb_objset;
uint64_t zb_object;
int64_t zb_level;
uint64_t zb_blkid;
};
#define SET_BOOKMARK(zb, objset, object, level, blkid) \
{ \
(zb)->zb_objset = objset; \
(zb)->zb_object = object; \
(zb)->zb_level = level; \
(zb)->zb_blkid = blkid; \
}
#define ZB_DESTROYED_OBJSET (-1ULL)
#define ZB_ROOT_OBJECT (0ULL)
#define ZB_ROOT_LEVEL (-1LL)
#define ZB_ROOT_BLKID (0ULL)
#define ZB_ZIL_OBJECT (0ULL)
#define ZB_ZIL_LEVEL (-2LL)
#define ZB_DNODE_LEVEL (-3LL)
#define ZB_DNODE_BLKID (0ULL)
#define ZB_IS_ZERO(zb) \
((zb)->zb_objset == 0 && (zb)->zb_object == 0 && \
(zb)->zb_level == 0 && (zb)->zb_blkid == 0)
#define ZB_IS_ROOT(zb) \
((zb)->zb_object == ZB_ROOT_OBJECT && \
(zb)->zb_level == ZB_ROOT_LEVEL && \
(zb)->zb_blkid == ZB_ROOT_BLKID)
typedef struct zio_prop {
enum zio_checksum zp_checksum;
enum zio_compress zp_compress;
uint8_t zp_complevel;
dmu_object_type_t zp_type;
uint8_t zp_level;
uint8_t zp_copies;
boolean_t zp_dedup;
boolean_t zp_dedup_verify;
boolean_t zp_nopwrite;
boolean_t zp_encrypt;
boolean_t zp_byteorder;
uint8_t zp_salt[ZIO_DATA_SALT_LEN];
uint8_t zp_iv[ZIO_DATA_IV_LEN];
uint8_t zp_mac[ZIO_DATA_MAC_LEN];
uint32_t zp_zpl_smallblk;
} zio_prop_t;
typedef struct zio_cksum_report zio_cksum_report_t;
typedef void zio_cksum_finish_f(zio_cksum_report_t *rep,
const abd_t *good_data);
typedef void zio_cksum_free_f(void *cbdata, size_t size);
struct zio_bad_cksum; /* defined in zio_checksum.h */
struct dnode_phys;
struct abd;
struct zio_cksum_report {
struct zio_cksum_report *zcr_next;
nvlist_t *zcr_ereport;
nvlist_t *zcr_detector;
void *zcr_cbdata;
size_t zcr_cbinfo; /* passed to zcr_free() */
uint64_t zcr_sector;
uint64_t zcr_align;
uint64_t zcr_length;
zio_cksum_finish_f *zcr_finish;
zio_cksum_free_f *zcr_free;
/* internal use only */
struct zio_bad_cksum *zcr_ckinfo; /* information from failure */
};
typedef struct zio_vsd_ops {
zio_done_func_t *vsd_free;
} zio_vsd_ops_t;
typedef struct zio_gang_node {
zio_gbh_phys_t *gn_gbh;
struct zio_gang_node *gn_child[SPA_GBH_NBLKPTRS];
} zio_gang_node_t;
typedef zio_t *zio_gang_issue_func_t(zio_t *zio, blkptr_t *bp,
zio_gang_node_t *gn, struct abd *data, uint64_t offset);
typedef void zio_transform_func_t(zio_t *zio, struct abd *data, uint64_t size);
typedef struct zio_transform {
struct abd *zt_orig_abd;
uint64_t zt_orig_size;
uint64_t zt_bufsize;
zio_transform_func_t *zt_transform;
struct zio_transform *zt_next;
} zio_transform_t;
typedef zio_t *zio_pipe_stage_t(zio_t *zio);
/*
* The io_reexecute flags are distinct from io_flags because the child must
* be able to propagate them to the parent. The normal io_flags are local
* to the zio, not protected by any lock, and not modifiable by children;
* the reexecute flags are protected by io_lock, modifiable by children,
* and always propagated -- even when ZIO_FLAG_DONT_PROPAGATE is set.
*/
#define ZIO_REEXECUTE_NOW 0x01
#define ZIO_REEXECUTE_SUSPEND 0x02
/*
* The io_trim flags are used to specify the type of TRIM to perform. They
* only apply to ZIO_TYPE_TRIM zios are distinct from io_flags.
*/
enum trim_flag {
ZIO_TRIM_SECURE = 1 << 0,
};
typedef struct zio_alloc_list {
list_t zal_list;
uint64_t zal_size;
} zio_alloc_list_t;
typedef struct zio_link {
zio_t *zl_parent;
zio_t *zl_child;
list_node_t zl_parent_node;
list_node_t zl_child_node;
} zio_link_t;
struct zio {
/* Core information about this I/O */
zbookmark_phys_t io_bookmark;
zio_prop_t io_prop;
zio_type_t io_type;
enum zio_child io_child_type;
enum trim_flag io_trim_flags;
int io_cmd;
zio_priority_t io_priority;
uint8_t io_reexecute;
uint8_t io_state[ZIO_WAIT_TYPES];
uint64_t io_txg;
spa_t *io_spa;
blkptr_t *io_bp;
blkptr_t *io_bp_override;
blkptr_t io_bp_copy;
list_t io_parent_list;
list_t io_child_list;
zio_t *io_logical;
zio_transform_t *io_transform_stack;
/* Callback info */
zio_done_func_t *io_ready;
zio_done_func_t *io_children_ready;
zio_done_func_t *io_physdone;
zio_done_func_t *io_done;
void *io_private;
int64_t io_prev_space_delta; /* DMU private */
blkptr_t io_bp_orig;
/* io_lsize != io_orig_size iff this is a raw write */
uint64_t io_lsize;
/* Data represented by this I/O */
struct abd *io_abd;
struct abd *io_orig_abd;
uint64_t io_size;
uint64_t io_orig_size;
/* Stuff for the vdev stack */
vdev_t *io_vd;
void *io_vsd;
const zio_vsd_ops_t *io_vsd_ops;
metaslab_class_t *io_metaslab_class; /* dva throttle class */
uint64_t io_offset;
hrtime_t io_timestamp; /* submitted at */
hrtime_t io_queued_timestamp;
hrtime_t io_target_timestamp;
hrtime_t io_delta; /* vdev queue service delta */
hrtime_t io_delay; /* Device access time (disk or */
/* file). */
avl_node_t io_queue_node;
avl_node_t io_offset_node;
avl_node_t io_alloc_node;
zio_alloc_list_t io_alloc_list;
/* Internal pipeline state */
enum zio_flag io_flags;
enum zio_stage io_stage;
enum zio_stage io_pipeline;
enum zio_flag io_orig_flags;
enum zio_stage io_orig_stage;
enum zio_stage io_orig_pipeline;
enum zio_stage io_pipeline_trace;
int io_error;
int io_child_error[ZIO_CHILD_TYPES];
uint64_t io_children[ZIO_CHILD_TYPES][ZIO_WAIT_TYPES];
uint64_t io_child_count;
uint64_t io_phys_children;
uint64_t io_parent_count;
uint64_t *io_stall;
zio_t *io_gang_leader;
zio_gang_node_t *io_gang_tree;
void *io_executor;
void *io_waiter;
void *io_bio;
kmutex_t io_lock;
kcondvar_t io_cv;
int io_allocator;
/* FMA state */
zio_cksum_report_t *io_cksum_report;
uint64_t io_ena;
/* Taskq dispatching state */
taskq_ent_t io_tqent;
};
enum blk_verify_flag {
BLK_VERIFY_ONLY,
BLK_VERIFY_LOG,
BLK_VERIFY_HALT
};
extern int zio_bookmark_compare(const void *, const void *);
extern zio_t *zio_null(zio_t *pio, spa_t *spa, vdev_t *vd,
zio_done_func_t *done, void *priv, enum zio_flag flags);
extern zio_t *zio_root(spa_t *spa,
zio_done_func_t *done, void *priv, enum zio_flag flags);
extern zio_t *zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
struct abd *data, uint64_t lsize, zio_done_func_t *done, void *priv,
zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb);
extern zio_t *zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
struct abd *data, uint64_t size, uint64_t psize, const zio_prop_t *zp,
zio_done_func_t *ready, zio_done_func_t *children_ready,
zio_done_func_t *physdone, zio_done_func_t *done,
void *priv, zio_priority_t priority, enum zio_flag flags,
const zbookmark_phys_t *zb);
extern zio_t *zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
struct abd *data, uint64_t size, zio_done_func_t *done, void *priv,
zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb);
extern void zio_write_override(zio_t *zio, blkptr_t *bp, int copies,
boolean_t nopwrite);
extern void zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp);
extern zio_t *zio_claim(zio_t *pio, spa_t *spa, uint64_t txg,
const blkptr_t *bp,
zio_done_func_t *done, void *priv, enum zio_flag flags);
extern zio_t *zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
zio_done_func_t *done, void *priv, enum zio_flag flags);
extern zio_t *zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
zio_done_func_t *done, void *priv, zio_priority_t priority,
enum zio_flag flags, enum trim_flag trim_flags);
extern zio_t *zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset,
uint64_t size, struct abd *data, int checksum,
zio_done_func_t *done, void *priv, zio_priority_t priority,
enum zio_flag flags, boolean_t labels);
extern zio_t *zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset,
uint64_t size, struct abd *data, int checksum,
zio_done_func_t *done, void *priv, zio_priority_t priority,
enum zio_flag flags, boolean_t labels);
extern zio_t *zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg,
const blkptr_t *bp, enum zio_flag flags);
extern int zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg,
blkptr_t *new_bp, uint64_t size, boolean_t *slog);
extern void zio_flush(zio_t *zio, vdev_t *vd);
extern void zio_shrink(zio_t *zio, uint64_t size);
extern int zio_wait(zio_t *zio);
extern void zio_nowait(zio_t *zio);
extern void zio_execute(void *zio);
extern void zio_interrupt(void *zio);
extern void zio_delay_init(zio_t *zio);
extern void zio_delay_interrupt(zio_t *zio);
extern void zio_deadman(zio_t *zio, char *tag);
extern zio_t *zio_walk_parents(zio_t *cio, zio_link_t **);
extern zio_t *zio_walk_children(zio_t *pio, zio_link_t **);
extern zio_t *zio_unique_parent(zio_t *cio);
extern void zio_add_child(zio_t *pio, zio_t *cio);
extern void *zio_buf_alloc(size_t size);
extern void zio_buf_free(void *buf, size_t size);
extern void *zio_data_buf_alloc(size_t size);
extern void zio_data_buf_free(void *buf, size_t size);
extern void zio_push_transform(zio_t *zio, struct abd *abd, uint64_t size,
uint64_t bufsize, zio_transform_func_t *transform);
extern void zio_pop_transforms(zio_t *zio);
extern void zio_resubmit_stage_async(void *);
extern zio_t *zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd,
uint64_t offset, struct abd *data, uint64_t size, int type,
zio_priority_t priority, enum zio_flag flags,
zio_done_func_t *done, void *priv);
extern zio_t *zio_vdev_delegated_io(vdev_t *vd, uint64_t offset,
struct abd *data, uint64_t size, zio_type_t type, zio_priority_t priority,
enum zio_flag flags, zio_done_func_t *done, void *priv);
extern void zio_vdev_io_bypass(zio_t *zio);
extern void zio_vdev_io_reissue(zio_t *zio);
extern void zio_vdev_io_redone(zio_t *zio);
extern void zio_change_priority(zio_t *pio, zio_priority_t priority);
extern void zio_checksum_verified(zio_t *zio);
extern int zio_worst_error(int e1, int e2);
extern enum zio_checksum zio_checksum_select(enum zio_checksum child,
enum zio_checksum parent);
extern enum zio_checksum zio_checksum_dedup_select(spa_t *spa,
enum zio_checksum child, enum zio_checksum parent);
extern enum zio_compress zio_compress_select(spa_t *spa,
enum zio_compress child, enum zio_compress parent);
extern uint8_t zio_complevel_select(spa_t *spa, enum zio_compress compress,
uint8_t child, uint8_t parent);
extern void zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t);
extern int zio_resume(spa_t *spa);
extern void zio_resume_wait(spa_t *spa);
extern boolean_t zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp,
boolean_t config_held, enum blk_verify_flag blk_verify);
/*
* Initial setup and teardown.
*/
extern void zio_init(void);
extern void zio_fini(void);
/*
* Fault injection
*/
struct zinject_record;
extern uint32_t zio_injection_enabled;
extern int zio_inject_fault(char *name, int flags, int *id,
struct zinject_record *record);
extern int zio_inject_list_next(int *id, char *name, size_t buflen,
struct zinject_record *record);
extern int zio_clear_fault(int id);
extern void zio_handle_panic_injection(spa_t *spa, char *tag, uint64_t type);
extern int zio_handle_decrypt_injection(spa_t *spa, const zbookmark_phys_t *zb,
uint64_t type, int error);
extern int zio_handle_fault_injection(zio_t *zio, int error);
extern int zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error);
extern int zio_handle_device_injections(vdev_t *vd, zio_t *zio, int err1,
int err2);
extern int zio_handle_label_injection(zio_t *zio, int error);
extern void zio_handle_ignored_writes(zio_t *zio);
extern hrtime_t zio_handle_io_delay(zio_t *zio);
/*
* Checksum ereport functions
*/
extern int zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, struct zio *zio, uint64_t offset,
uint64_t length, struct zio_bad_cksum *info);
extern void zfs_ereport_finish_checksum(zio_cksum_report_t *report,
const abd_t *good_data, const abd_t *bad_data, boolean_t drop_if_identical);
extern void zfs_ereport_free_checksum(zio_cksum_report_t *report);
/* If we have the good data in hand, this function can be used */
extern int zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, struct zio *zio, uint64_t offset,
uint64_t length, const abd_t *good_data, const abd_t *bad_data,
struct zio_bad_cksum *info);
void zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr);
+extern void zfs_ereport_snapshot_post(const char *subclass, spa_t *spa,
+ const char *name);
/* Called from spa_sync(), but primarily an injection handler */
extern void spa_handle_ignored_writes(spa_t *spa);
/* zbookmark_phys functions */
boolean_t zbookmark_subtree_completed(const struct dnode_phys *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block);
int zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2,
uint8_t ibs2, const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2);
#ifdef __cplusplus
}
#endif
#endif /* _ZIO_H */
diff --git a/sys/contrib/openzfs/lib/libnvpair/libnvpair.abi b/sys/contrib/openzfs/lib/libnvpair/libnvpair.abi
index f0845c796342..b44f0c9eb2ec 100644
--- a/sys/contrib/openzfs/lib/libnvpair/libnvpair.abi
+++ b/sys/contrib/openzfs/lib/libnvpair/libnvpair.abi
@@ -1,3191 +1,2942 @@
<abi-corpus architecture='elf-amd-x86_64' soname='libnvpair.so.3'>
<elf-needed>
<dependency name='libc.so.6'/>
</elf-needed>
<elf-function-symbols>
<elf-symbol name='_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='dump_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_add_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_alloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_dup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_lookup_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_merge' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_num_pairs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_pack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_pack_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_remove_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_size' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvlist_unpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fnvpair_value_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_assertf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nv_alloc_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nv_alloc_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nv_alloc_reset' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_double' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_hrtime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_add_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_alloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_dup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_double' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_hrtime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_nv_alloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_nvpair_embedded_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_pairs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_lookup_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_merge' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_next_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_nvflag' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_pack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prev_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_print' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_print_json' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_alloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_dofmt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_doindent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_getdest' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_setdest' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_setfmt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctl_setindent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_boolean' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_double' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_hrtime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_prtctlop_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_remove_all' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_remove_nvpair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_size' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_unpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_xalloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_xdup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_xpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvlist_xunpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_type_is_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_boolean_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_boolean_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_byte' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_byte_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_double' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_hrtime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_int8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_match' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_match_regex' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_nvlist_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_string_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint16_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint32_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint64_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nvpair_value_uint8_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
<elf-variable-symbols>
<elf-symbol name='libspl_assert_ok' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nv_alloc_nosleep' size='8' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='nv_fixed_ops' size='8' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-variable-symbols>
- <abi-instr version='1.0' address-size='64' path='libnvpair.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libnvpair' language='LANG_C99'>
- <type-decl name='int' size-in-bits='32' id='type-id-1'/>
- <class-decl name='nvpair' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-2'>
+ <abi-instr version='1.0' address-size='64' path='../../module/nvpair/fnvpair.c' language='LANG_C99'>
+ <function-decl name='fnvpair_value_nvlist' mangled-name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_nvlist'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='5ce45b60'/>
+ </function-decl>
+ <function-decl name='fnvpair_value_string' mangled-name='fnvpair_value_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_string'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='26a90f95'/>
+ </function-decl>
+ <function-decl name='fnvpair_value_uint64' mangled-name='fnvpair_value_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint64'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='9c313c2d'/>
+ </function-decl>
+ <function-decl name='fnvpair_value_uint32' mangled-name='fnvpair_value_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint32'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='8f92235e'/>
+ </function-decl>
+ <function-decl name='fnvpair_value_uint16' mangled-name='fnvpair_value_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint16'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='149c6638'/>
+ </function-decl>
+ <function-decl name='fnvpair_value_uint8' mangled-name='fnvpair_value_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint8'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='b96825af'/>
+ </function-decl>
+ <function-decl name='fnvpair_value_int64' mangled-name='fnvpair_value_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int64'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='9da381c4'/>
+ </function-decl>
+ <function-decl name='fnvpair_value_int32' mangled-name='fnvpair_value_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int32'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='3ff5601b'/>
+ </function-decl>
+ <function-decl name='fnvpair_value_int16' mangled-name='fnvpair_value_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int16'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='23bd8cb5'/>
+ </function-decl>
+ <function-decl name='fnvpair_value_int8' mangled-name='fnvpair_value_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int8'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='ee31ee44'/>
+ </function-decl>
+ <function-decl name='fnvpair_value_byte' mangled-name='fnvpair_value_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_byte'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='d8bf0010'/>
+ </function-decl>
+ <function-decl name='fnvpair_value_boolean_value' mangled-name='fnvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_boolean_value'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_uint64_array' mangled-name='fnvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint64_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='5d6479ae'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_int64_array' mangled-name='fnvlist_lookup_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int64_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='cb785ebf'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_uint32_array' mangled-name='fnvlist_lookup_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint32_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='90421557'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_int32_array' mangled-name='fnvlist_lookup_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int32_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='4aafb922'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_uint16_array' mangled-name='fnvlist_lookup_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint16_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='8a121f49'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_int16_array' mangled-name='fnvlist_lookup_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int16_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='f76f73d0'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_uint8_array' mangled-name='fnvlist_lookup_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint8_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='ae3e8ca6'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_int8_array' mangled-name='fnvlist_lookup_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int8_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='256d5229'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_byte_array' mangled-name='fnvlist_lookup_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_byte_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='45b65157'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_boolean_array' mangled-name='fnvlist_lookup_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_boolean_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='37e3bd22'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_nvlist' mangled-name='fnvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_nvlist'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='5ce45b60'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_string' mangled-name='fnvlist_lookup_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_string'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='26a90f95'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_uint64' mangled-name='fnvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint64'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='9c313c2d'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_uint32' mangled-name='fnvlist_lookup_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint32'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='8f92235e'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_uint16' mangled-name='fnvlist_lookup_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint16'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='149c6638'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_uint8' mangled-name='fnvlist_lookup_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint8'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='b96825af'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_int64' mangled-name='fnvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int64'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='9da381c4'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_int32' mangled-name='fnvlist_lookup_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int32'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='3ff5601b'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_int16' mangled-name='fnvlist_lookup_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int16'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='23bd8cb5'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_int8' mangled-name='fnvlist_lookup_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int8'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='ee31ee44'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_byte' mangled-name='fnvlist_lookup_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_byte'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='d8bf0010'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_boolean_value' mangled-name='fnvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_boolean_value'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_boolean' mangled-name='fnvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_boolean'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='fnvlist_lookup_nvpair' mangled-name='fnvlist_lookup_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_nvpair'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='3fa542f0'/>
+ </function-decl>
+ <function-decl name='fnvlist_remove_nvpair' mangled-name='fnvlist_remove_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_remove_nvpair'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='3fa542f0' name='pair'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_remove' mangled-name='fnvlist_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_remove'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_nvlist_array' mangled-name='fnvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_nvlist_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='857bb57e' name='val'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_string_array' mangled-name='fnvlist_add_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_string_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='f319fae0' name='val'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_uint64_array' mangled-name='fnvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint64_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='5d6479ae' name='val'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_int64_array' mangled-name='fnvlist_add_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int64_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='cb785ebf' name='val'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_uint32_array' mangled-name='fnvlist_add_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint32_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='90421557' name='val'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_int32_array' mangled-name='fnvlist_add_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int32_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='4aafb922' name='val'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_uint16_array' mangled-name='fnvlist_add_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint16_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='8a121f49' name='val'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_int16_array' mangled-name='fnvlist_add_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int16_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='f76f73d0' name='val'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_uint8_array' mangled-name='fnvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint8_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='ae3e8ca6' name='val'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_int8_array' mangled-name='fnvlist_add_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int8_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='256d5229' name='val'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_byte_array' mangled-name='fnvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_byte_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='45b65157' name='val'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_boolean_array' mangled-name='fnvlist_add_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_boolean_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='37e3bd22' name='val'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_nvpair' mangled-name='fnvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_nvpair'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='3fa542f0' name='pair'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_nvlist' mangled-name='fnvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_nvlist'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='5ce45b60' name='val'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_string' mangled-name='fnvlist_add_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_string'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='80f4b756' name='val'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_uint64' mangled-name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint64'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='9c313c2d' name='val'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_int64' mangled-name='fnvlist_add_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int64'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='9da381c4' name='val'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_uint32' mangled-name='fnvlist_add_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint32'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='8f92235e' name='val'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_int32' mangled-name='fnvlist_add_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int32'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='3ff5601b' name='val'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_uint16' mangled-name='fnvlist_add_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint16'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='149c6638' name='val'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_int16' mangled-name='fnvlist_add_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int16'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='23bd8cb5' name='val'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_uint8' mangled-name='fnvlist_add_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint8'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='b96825af' name='val'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_int8' mangled-name='fnvlist_add_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int8'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='ee31ee44' name='val'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_byte' mangled-name='fnvlist_add_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_byte'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='d8bf0010' name='val'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_boolean_value' mangled-name='fnvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_boolean_value'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='c19b74c3' name='val'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_add_boolean' mangled-name='fnvlist_add_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_boolean'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_num_pairs' mangled-name='fnvlist_num_pairs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_num_pairs'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <return type-id='b59d7dce'/>
+ </function-decl>
+ <function-decl name='fnvlist_merge' mangled-name='fnvlist_merge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_merge'>
+ <parameter type-id='5ce45b60' name='dst'/>
+ <parameter type-id='5ce45b60' name='src'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_dup' mangled-name='fnvlist_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_dup'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <return type-id='5ce45b60'/>
+ </function-decl>
+ <function-decl name='fnvlist_unpack' mangled-name='fnvlist_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_unpack'>
+ <parameter type-id='26a90f95' name='buf'/>
+ <parameter type-id='b59d7dce' name='buflen'/>
+ <return type-id='5ce45b60'/>
+ </function-decl>
+ <function-decl name='fnvlist_pack_free' mangled-name='fnvlist_pack_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_pack_free'>
+ <parameter type-id='26a90f95' name='pack'/>
+ <parameter type-id='b59d7dce' name='size'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_pack' mangled-name='fnvlist_pack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_pack'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='78c01427' name='sizep'/>
+ <return type-id='26a90f95'/>
+ </function-decl>
+ <function-decl name='fnvlist_size' mangled-name='fnvlist_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_size'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <return type-id='b59d7dce'/>
+ </function-decl>
+ <function-decl name='fnvlist_free' mangled-name='fnvlist_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_free'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fnvlist_alloc' mangled-name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_alloc'>
+ <return type-id='5ce45b60'/>
+ </function-decl>
+ <pointer-type-def type-id='c19b74c3' size-in-bits='64' id='37e3bd22'/>
+ <pointer-type-def type-id='a84c031d' size-in-bits='64' id='26a90f95'/>
+ <pointer-type-def type-id='57de658a' size-in-bits='64' id='f319fae0'/>
+ <pointer-type-def type-id='9b45d938' size-in-bits='64' id='80f4b756'/>
+ <pointer-type-def type-id='23bd8cb5' size-in-bits='64' id='f76f73d0'/>
+ <pointer-type-def type-id='3ff5601b' size-in-bits='64' id='4aafb922'/>
+ <pointer-type-def type-id='9da381c4' size-in-bits='64' id='cb785ebf'/>
+ <pointer-type-def type-id='ee31ee44' size-in-bits='64' id='256d5229'/>
+ <pointer-type-def type-id='8e8d4be3' size-in-bits='64' id='5ce45b60'/>
+ <pointer-type-def type-id='5ce45b60' size-in-bits='64' id='857bb57e'/>
+ <pointer-type-def type-id='57928edf' size-in-bits='64' id='3fa542f0'/>
+ <pointer-type-def type-id='b59d7dce' size-in-bits='64' id='78c01427'/>
+ <typedef-decl name='boolean_t' type-id='40ed39d2' id='c19b74c3'/>
+ <typedef-decl name='int16_t' type-id='03896e23' id='23bd8cb5'/>
+ <typedef-decl name='int32_t' type-id='33f57a65' id='3ff5601b'/>
+ <typedef-decl name='int64_t' type-id='0c9942d2' id='9da381c4'/>
+ <typedef-decl name='int8_t' type-id='2171a512' id='ee31ee44'/>
+ <typedef-decl name='size_t' type-id='7359adad' id='b59d7dce'/>
+ <typedef-decl name='uchar_t' type-id='002ac4a6' id='d8bf0010'/>
+ <typedef-decl name='uint16_t' type-id='253c2d2a' id='149c6638'/>
+ <typedef-decl name='uint32_t' type-id='62f1140c' id='8f92235e'/>
+ <typedef-decl name='uint64_t' type-id='8910171f' id='9c313c2d'/>
+ <typedef-decl name='uint8_t' type-id='c51d6389' id='b96825af'/>
+ <typedef-decl name='uint_t' type-id='f0981eeb' id='3502e3ff'/>
+ <pointer-type-def type-id='d8bf0010' size-in-bits='64' id='45b65157'/>
+ <pointer-type-def type-id='149c6638' size-in-bits='64' id='8a121f49'/>
+ <pointer-type-def type-id='8f92235e' size-in-bits='64' id='90421557'/>
+ <pointer-type-def type-id='9c313c2d' size-in-bits='64' id='5d6479ae'/>
+ <pointer-type-def type-id='b96825af' size-in-bits='64' id='ae3e8ca6'/>
+ <pointer-type-def type-id='3502e3ff' size-in-bits='64' id='4dd26a40'/>
+ <type-decl name='void' id='48b5725f'/>
+ <type-decl name='char' size-in-bits='8' id='a84c031d'/>
+ <qualified-type-def type-id='26a90f95' const='yes' id='57de658a'/>
+ <qualified-type-def type-id='a84c031d' const='yes' id='9b45d938'/>
+ <enum-decl name='__anonymous_enum__1' is-anonymous='yes' id='40ed39d2'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='B_FALSE' value='0'/>
+ <enumerator name='B_TRUE' value='1'/>
+ </enum-decl>
+ <typedef-decl name='__int16_t' type-id='a2185560' id='03896e23'/>
+ <typedef-decl name='__int32_t' type-id='95e97e5e' id='33f57a65'/>
+ <typedef-decl name='__int64_t' type-id='bd54fe1a' id='0c9942d2'/>
+ <typedef-decl name='__int8_t' type-id='28577a57' id='2171a512'/>
+ <typedef-decl name='__uint16_t' type-id='8efea9e5' id='253c2d2a'/>
+ <typedef-decl name='__uint32_t' type-id='f0981eeb' id='62f1140c'/>
+ <typedef-decl name='__uint64_t' type-id='7359adad' id='8910171f'/>
+ <typedef-decl name='__uint8_t' type-id='002ac4a6' id='c51d6389'/>
+ <typedef-decl name='nvlist_t' type-id='ac266fd9' id='8e8d4be3'/>
+ <typedef-decl name='nvpair_t' type-id='1c34e459' id='57928edf'/>
+ <type-decl name='unsigned char' size-in-bits='8' id='002ac4a6'/>
+ <type-decl name='unsigned int' size-in-bits='32' id='f0981eeb'/>
+ <type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
+ <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='ac266fd9'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='nvl_version' type-id='3ff5601b' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='nvl_nvflag' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='nvl_priv' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='nvl_flag' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='nvl_pad' type-id='3ff5601b' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='nvpair' size-in-bits='128' is-struct='yes' visibility='default' id='1c34e459'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nvp_size' type-id='type-id-3' visibility='default'/>
+ <var-decl name='nvp_size' type-id='3ff5601b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='nvp_name_sz' type-id='type-id-4' visibility='default'/>
+ <var-decl name='nvp_name_sz' type-id='23bd8cb5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='48'>
- <var-decl name='nvp_reserve' type-id='type-id-4' visibility='default'/>
+ <var-decl name='nvp_reserve' type-id='23bd8cb5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nvp_value_elem' type-id='type-id-3' visibility='default'/>
+ <var-decl name='nvp_value_elem' type-id='3ff5601b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='nvp_type' type-id='type-id-5' visibility='default'/>
+ <var-decl name='nvp_type' type-id='8d0687d2' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__int32_t' type-id='type-id-1' id='type-id-6'/>
- <typedef-decl name='int32_t' type-id='type-id-6' id='type-id-3'/>
- <type-decl name='short int' size-in-bits='16' id='type-id-7'/>
- <typedef-decl name='__int16_t' type-id='type-id-7' id='type-id-8'/>
- <typedef-decl name='int16_t' type-id='type-id-8' id='type-id-4'/>
- <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-9'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-10'>
- <underlying-type type-id='type-id-9'/>
+ <type-decl name='int' size-in-bits='32' id='95e97e5e'/>
+ <type-decl name='long int' size-in-bits='64' id='bd54fe1a'/>
+ <type-decl name='short int' size-in-bits='16' id='a2185560'/>
+ <type-decl name='signed char' size-in-bits='8' id='28577a57'/>
+ <type-decl name='unsigned short int' size-in-bits='16' id='8efea9e5'/>
+ <typedef-decl name='data_type_t' type-id='08f5ca17' id='8d0687d2'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca17'>
+ <underlying-type type-id='9cac1fee'/>
<enumerator name='DATA_TYPE_DONTCARE' value='-1'/>
<enumerator name='DATA_TYPE_UNKNOWN' value='0'/>
<enumerator name='DATA_TYPE_BOOLEAN' value='1'/>
<enumerator name='DATA_TYPE_BYTE' value='2'/>
<enumerator name='DATA_TYPE_INT16' value='3'/>
<enumerator name='DATA_TYPE_UINT16' value='4'/>
<enumerator name='DATA_TYPE_INT32' value='5'/>
<enumerator name='DATA_TYPE_UINT32' value='6'/>
<enumerator name='DATA_TYPE_INT64' value='7'/>
<enumerator name='DATA_TYPE_UINT64' value='8'/>
<enumerator name='DATA_TYPE_STRING' value='9'/>
<enumerator name='DATA_TYPE_BYTE_ARRAY' value='10'/>
<enumerator name='DATA_TYPE_INT16_ARRAY' value='11'/>
<enumerator name='DATA_TYPE_UINT16_ARRAY' value='12'/>
<enumerator name='DATA_TYPE_INT32_ARRAY' value='13'/>
<enumerator name='DATA_TYPE_UINT32_ARRAY' value='14'/>
<enumerator name='DATA_TYPE_INT64_ARRAY' value='15'/>
<enumerator name='DATA_TYPE_UINT64_ARRAY' value='16'/>
<enumerator name='DATA_TYPE_STRING_ARRAY' value='17'/>
<enumerator name='DATA_TYPE_HRTIME' value='18'/>
<enumerator name='DATA_TYPE_NVLIST' value='19'/>
<enumerator name='DATA_TYPE_NVLIST_ARRAY' value='20'/>
<enumerator name='DATA_TYPE_BOOLEAN_VALUE' value='21'/>
<enumerator name='DATA_TYPE_INT8' value='22'/>
<enumerator name='DATA_TYPE_UINT8' value='23'/>
<enumerator name='DATA_TYPE_BOOLEAN_ARRAY' value='24'/>
<enumerator name='DATA_TYPE_INT8_ARRAY' value='25'/>
<enumerator name='DATA_TYPE_UINT8_ARRAY' value='26'/>
<enumerator name='DATA_TYPE_DOUBLE' value='27'/>
</enum-decl>
- <typedef-decl name='data_type_t' type-id='type-id-10' id='type-id-5'/>
- <typedef-decl name='nvpair_t' type-id='type-id-2' id='type-id-11'/>
- <pointer-type-def type-id='type-id-11' size-in-bits='64' id='type-id-12'/>
- <type-decl name='char' size-in-bits='8' id='type-id-13'/>
- <pointer-type-def type-id='type-id-13' size-in-bits='64' id='type-id-14'/>
- <pointer-type-def type-id='type-id-14' size-in-bits='64' id='type-id-15'/>
- <function-decl name='nvpair_value_match' mangled-name='nvpair_value_match' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_match'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-1' name='ai'/>
- <parameter type-id='type-id-14' name='value'/>
- <parameter type-id='type-id-15' name='ep'/>
- <return type-id='type-id-1'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/nvpair/nvpair.c' language='LANG_C99'>
+ <pointer-type-def type-id='37e3bd22' size-in-bits='64' id='03829398'/>
+ <qualified-type-def type-id='26a90f95' const='yes' id='57de658a'/>
+ <pointer-type-def type-id='57de658a' size-in-bits='64' id='f319fae0'/>
+ <pointer-type-def type-id='9b23c9ad' size-in-bits='64' id='c0563f85'/>
+ <pointer-type-def type-id='a0eb0f08' size-in-bits='64' id='7408d286'/>
+ <pointer-type-def type-id='cebdd548' size-in-bits='64' id='e379e62d'/>
+ <pointer-type-def type-id='95e97e5e' size-in-bits='64' id='7292109c'/>
+ <pointer-type-def type-id='f76f73d0' size-in-bits='64' id='7e73928e'/>
+ <pointer-type-def type-id='4aafb922' size-in-bits='64' id='9aa04798'/>
+ <pointer-type-def type-id='cb785ebf' size-in-bits='64' id='e37ce48f'/>
+ <pointer-type-def type-id='256d5229' size-in-bits='64' id='ee181ab9'/>
+ <pointer-type-def type-id='857bb57e' size-in-bits='64' id='75be733c'/>
+ <pointer-type-def type-id='3fa542f0' size-in-bits='64' id='0b283d2e'/>
+ <pointer-type-def type-id='b59d7dce' size-in-bits='64' id='78c01427'/>
+ <pointer-type-def type-id='45b65157' size-in-bits='64' id='3b0247c7'/>
+ <pointer-type-def type-id='8a121f49' size-in-bits='64' id='bd8768d9'/>
+ <pointer-type-def type-id='90421557' size-in-bits='64' id='9507d3c7'/>
+ <pointer-type-def type-id='5d6479ae' size-in-bits='64' id='892b4acc'/>
+ <pointer-type-def type-id='ae3e8ca6' size-in-bits='64' id='d8774064'/>
+ <pointer-type-def type-id='3502e3ff' size-in-bits='64' id='4dd26a40'/>
+ <function-decl name='nvlist_xunpack' mangled-name='nvlist_xunpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xunpack'>
+ <parameter type-id='26a90f95' name='buf'/>
+ <parameter type-id='b59d7dce' name='buflen'/>
+ <parameter type-id='857bb57e' name='nvlp'/>
+ <parameter type-id='11871392' name='nva'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <class-decl name='re_pattern_buffer' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-16'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='buffer' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='allocated' type-id='type-id-18' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='used' type-id='type-id-18' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='syntax' type-id='type-id-19' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='fastmap' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='translate' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='re_nsub' type-id='type-id-20' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='31'>
- <var-decl name='can_be_null' type-id='type-id-21' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='29'>
- <var-decl name='regs_allocated' type-id='type-id-21' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='28'>
- <var-decl name='fastmap_accurate' type-id='type-id-21' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='27'>
- <var-decl name='no_sub' type-id='type-id-21' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='26'>
- <var-decl name='not_bol' type-id='type-id-21' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='25'>
- <var-decl name='not_eol' type-id='type-id-21' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='24'>
- <var-decl name='newline_anchor' type-id='type-id-21' visibility='default'/>
- </data-member>
- </class-decl>
- <type-decl name='unsigned char' size-in-bits='8' id='type-id-22'/>
- <pointer-type-def type-id='type-id-22' size-in-bits='64' id='type-id-17'/>
- <type-decl name='unsigned long int' size-in-bits='64' id='type-id-18'/>
- <typedef-decl name='reg_syntax_t' type-id='type-id-18' id='type-id-19'/>
- <typedef-decl name='size_t' type-id='type-id-18' id='type-id-20'/>
- <type-decl name='unsigned int' size-in-bits='32' id='type-id-21'/>
- <typedef-decl name='regex_t' type-id='type-id-16' id='type-id-23'/>
- <pointer-type-def type-id='type-id-23' size-in-bits='64' id='type-id-24'/>
- <function-decl name='nvpair_value_match_regex' mangled-name='nvpair_value_match_regex' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_match_regex'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-1' name='ai'/>
- <parameter type-id='type-id-14' name='value'/>
- <parameter type-id='type-id-24' name='value_regex'/>
- <parameter type-id='type-id-15' name='ep'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <type-decl name='void' id='type-id-25'/>
- <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-26'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nvl_version' type-id='type-id-3' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='nvl_nvflag' type-id='type-id-27' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nvl_priv' type-id='type-id-28' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='nvl_flag' type-id='type-id-27' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='nvl_pad' type-id='type-id-3' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='__uint32_t' type-id='type-id-21' id='type-id-29'/>
- <typedef-decl name='uint32_t' type-id='type-id-29' id='type-id-27'/>
- <typedef-decl name='__uint64_t' type-id='type-id-18' id='type-id-30'/>
- <typedef-decl name='uint64_t' type-id='type-id-30' id='type-id-28'/>
- <typedef-decl name='nvlist_t' type-id='type-id-26' id='type-id-31'/>
- <pointer-type-def type-id='type-id-31' size-in-bits='64' id='type-id-32'/>
- <function-decl name='dump_nvlist' mangled-name='dump_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dump_nvlist'>
- <parameter type-id='type-id-32' name='list'/>
- <parameter type-id='type-id-1' name='indent'/>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_unpack' mangled-name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_unpack'>
+ <parameter type-id='26a90f95' name='buf'/>
+ <parameter type-id='b59d7dce' name='buflen'/>
+ <parameter type-id='857bb57e' name='nvlp'/>
+ <parameter type-id='95e97e5e' name='kmflag'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <class-decl name='nvlist_prtctl' size-in-bits='576' is-struct='yes' visibility='default' id='type-id-33'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nvprt_fp' type-id='type-id-34' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nvprt_indent_mode' type-id='type-id-35' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='nvprt_indent' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='nvprt_indentinc' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='nvprt_nmfmt' type-id='type-id-36' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='nvprt_eomfmt' type-id='type-id-36' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='nvprt_btwnarrfmt' type-id='type-id-36' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='nvprt_btwnarrfmt_nl' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='nvprt_dfltops' type-id='type-id-37' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='nvprt_custops' type-id='type-id-37' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='type-id-38'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_flags' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_IO_read_ptr' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_IO_read_end' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='_IO_read_base' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='_IO_write_base' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='_IO_write_ptr' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='_IO_write_end' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='_IO_buf_base' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='_IO_buf_end' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='_IO_save_base' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='_IO_backup_base' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='_IO_save_end' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='_markers' type-id='type-id-39' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='_chain' type-id='type-id-40' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='_fileno' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='928'>
- <var-decl name='_flags2' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='_old_offset' type-id='type-id-41' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='_cur_column' type-id='type-id-42' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1040'>
- <var-decl name='_vtable_offset' type-id='type-id-43' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1048'>
- <var-decl name='_shortbuf' type-id='type-id-44' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='_offset' type-id='type-id-45' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='__pad1' type-id='type-id-46' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='__pad2' type-id='type-id-46' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1344'>
- <var-decl name='__pad3' type-id='type-id-46' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1408'>
- <var-decl name='__pad4' type-id='type-id-46' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1472'>
- <var-decl name='__pad5' type-id='type-id-20' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1536'>
- <var-decl name='_mode' type-id='type-id-1' visibility='default'/>
+ <function-decl name='nvlist_pack' mangled-name='nvlist_pack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_pack'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='9b23c9ad' name='bufp'/>
+ <parameter type-id='78c01427' name='buflen'/>
+ <parameter type-id='95e97e5e' name='encoding'/>
+ <parameter type-id='95e97e5e' name='kmflag'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_merge' mangled-name='nvlist_merge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_merge'>
+ <parameter type-id='5ce45b60' name='dst'/>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='95e97e5e' name='flag'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_nvpair' mangled-name='nvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_nvpair'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_hrtime' mangled-name='nvpair_value_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_hrtime'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='e379e62d' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_nvlist_array' mangled-name='nvpair_value_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_nvlist_array'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='75be733c' name='val'/>
+ <parameter type-id='4dd26a40' name='nelem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_string_array' mangled-name='nvpair_value_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_string_array'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='c0563f85' name='val'/>
+ <parameter type-id='4dd26a40' name='nelem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_uint64_array' mangled-name='nvpair_value_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint64_array'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='892b4acc' name='val'/>
+ <parameter type-id='4dd26a40' name='nelem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_int64_array' mangled-name='nvpair_value_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int64_array'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='e37ce48f' name='val'/>
+ <parameter type-id='4dd26a40' name='nelem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_uint32_array' mangled-name='nvpair_value_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint32_array'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='9507d3c7' name='val'/>
+ <parameter type-id='4dd26a40' name='nelem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_int32_array' mangled-name='nvpair_value_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int32_array'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='9aa04798' name='val'/>
+ <parameter type-id='4dd26a40' name='nelem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_uint16_array' mangled-name='nvpair_value_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint16_array'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='bd8768d9' name='val'/>
+ <parameter type-id='4dd26a40' name='nelem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_int16_array' mangled-name='nvpair_value_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int16_array'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='7e73928e' name='val'/>
+ <parameter type-id='4dd26a40' name='nelem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_uint8_array' mangled-name='nvpair_value_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint8_array'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='d8774064' name='val'/>
+ <parameter type-id='4dd26a40' name='nelem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_int8_array' mangled-name='nvpair_value_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int8_array'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='ee181ab9' name='val'/>
+ <parameter type-id='4dd26a40' name='nelem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_byte_array' mangled-name='nvpair_value_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_byte_array'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='3b0247c7' name='val'/>
+ <parameter type-id='4dd26a40' name='nelem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_boolean_array' mangled-name='nvpair_value_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_boolean_array'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='03829398' name='val'/>
+ <parameter type-id='4dd26a40' name='nelem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_nvlist' mangled-name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_nvlist'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='857bb57e' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_string' mangled-name='nvpair_value_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_string'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='9b23c9ad' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_double' mangled-name='nvpair_value_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_double'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='7408d286' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_uint64' mangled-name='nvpair_value_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint64'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='5d6479ae' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_int64' mangled-name='nvpair_value_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int64'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='cb785ebf' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_uint32' mangled-name='nvpair_value_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint32'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='90421557' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_int32' mangled-name='nvpair_value_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int32'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='4aafb922' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_uint16' mangled-name='nvpair_value_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint16'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='8a121f49' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_int16' mangled-name='nvpair_value_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int16'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='f76f73d0' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_uint8' mangled-name='nvpair_value_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint8'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='ae3e8ca6' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_int8' mangled-name='nvpair_value_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int8'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='256d5229' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_byte' mangled-name='nvpair_value_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_byte'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='45b65157' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_value_boolean_value' mangled-name='nvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_boolean_value'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='37e3bd22' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_exists' mangled-name='nvlist_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_exists'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_nvpair_embedded_index' mangled-name='nvlist_lookup_nvpair_embedded_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvpair_embedded_index'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='0b283d2e' name='ret'/>
+ <parameter type-id='7292109c' name='ip'/>
+ <parameter type-id='9b23c9ad' name='ep'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_nvpair' mangled-name='nvlist_lookup_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvpair'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='0b283d2e' name='ret'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_pairs' mangled-name='nvlist_lookup_pairs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_pairs'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='95e97e5e' name='flag'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_hrtime' mangled-name='nvlist_lookup_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_hrtime'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='e379e62d' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_nvlist_array' mangled-name='nvlist_lookup_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvlist_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='75be733c' name='a'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_string_array' mangled-name='nvlist_lookup_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_string_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='c0563f85' name='a'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_uint64_array' mangled-name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint64_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='892b4acc' name='a'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_int64_array' mangled-name='nvlist_lookup_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int64_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='e37ce48f' name='a'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_uint32_array' mangled-name='nvlist_lookup_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint32_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='9507d3c7' name='a'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_int32_array' mangled-name='nvlist_lookup_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int32_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='9aa04798' name='a'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_uint16_array' mangled-name='nvlist_lookup_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint16_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='bd8768d9' name='a'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_int16_array' mangled-name='nvlist_lookup_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int16_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='7e73928e' name='a'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_uint8_array' mangled-name='nvlist_lookup_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint8_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='d8774064' name='a'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_int8_array' mangled-name='nvlist_lookup_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int8_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='ee181ab9' name='a'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_byte_array' mangled-name='nvlist_lookup_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_byte_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='3b0247c7' name='a'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_boolean_array' mangled-name='nvlist_lookup_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_boolean_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='03829398' name='a'/>
+ <parameter type-id='4dd26a40' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_nvlist' mangled-name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvlist'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='857bb57e' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_string' mangled-name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_string'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='9b23c9ad' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_double' mangled-name='nvlist_lookup_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_double'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='7408d286' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_uint64' mangled-name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint64'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='5d6479ae' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_int64' mangled-name='nvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int64'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='cb785ebf' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_uint32' mangled-name='nvlist_lookup_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint32'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='90421557' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_int32' mangled-name='nvlist_lookup_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int32'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='4aafb922' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_uint16' mangled-name='nvlist_lookup_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint16'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='8a121f49' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_int16' mangled-name='nvlist_lookup_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int16'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='f76f73d0' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_uint8' mangled-name='nvlist_lookup_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint8'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='ae3e8ca6' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_int8' mangled-name='nvlist_lookup_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int8'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='256d5229' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_byte' mangled-name='nvlist_lookup_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_byte'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='45b65157' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_boolean_value' mangled-name='nvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_boolean_value'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='37e3bd22' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_boolean' mangled-name='nvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_boolean'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_type_is_array' mangled-name='nvpair_type_is_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_type_is_array'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvpair_type' mangled-name='nvpair_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_type'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='8d0687d2'/>
+ </function-decl>
+ <function-decl name='nvpair_name' mangled-name='nvpair_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_name'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='26a90f95'/>
+ </function-decl>
+ <function-decl name='nvlist_empty' mangled-name='nvlist_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_empty'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='nvlist_prev_nvpair' mangled-name='nvlist_prev_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prev_nvpair'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='3fa542f0'/>
+ </function-decl>
+ <function-decl name='nvlist_next_nvpair' mangled-name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_next_nvpair'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='3fa542f0'/>
+ </function-decl>
+ <function-decl name='nvlist_add_nvlist_array' mangled-name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_nvlist_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='857bb57e' name='a'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_nvlist' mangled-name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_nvlist'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='5ce45b60' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_hrtime' mangled-name='nvlist_add_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_hrtime'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='cebdd548' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_string_array' mangled-name='nvlist_add_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_string_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='f319fae0' name='a'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint64_array' mangled-name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint64_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='5d6479ae' name='a'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int64_array' mangled-name='nvlist_add_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int64_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='cb785ebf' name='a'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint32_array' mangled-name='nvlist_add_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint32_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='90421557' name='a'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int32_array' mangled-name='nvlist_add_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int32_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='4aafb922' name='a'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint16_array' mangled-name='nvlist_add_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint16_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='8a121f49' name='a'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int16_array' mangled-name='nvlist_add_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int16_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='f76f73d0' name='a'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint8_array' mangled-name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint8_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='ae3e8ca6' name='a'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int8_array' mangled-name='nvlist_add_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int8_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='256d5229' name='a'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_byte_array' mangled-name='nvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_byte_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='45b65157' name='a'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_boolean_array' mangled-name='nvlist_add_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_boolean_array'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='37e3bd22' name='a'/>
+ <parameter type-id='3502e3ff' name='n'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_string' mangled-name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_string'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='80f4b756' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_double' mangled-name='nvlist_add_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_double'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='a0eb0f08' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint64' mangled-name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint64'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='9c313c2d' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int64' mangled-name='nvlist_add_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int64'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='9da381c4' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint32' mangled-name='nvlist_add_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint32'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='8f92235e' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int32' mangled-name='nvlist_add_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int32'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='3ff5601b' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint16' mangled-name='nvlist_add_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint16'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='149c6638' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int16' mangled-name='nvlist_add_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int16'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='23bd8cb5' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_uint8' mangled-name='nvlist_add_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint8'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='b96825af' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_int8' mangled-name='nvlist_add_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int8'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='ee31ee44' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_byte' mangled-name='nvlist_add_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_byte'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='d8bf0010' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_boolean_value' mangled-name='nvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_boolean_value'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='c19b74c3' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_add_boolean' mangled-name='nvlist_add_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_boolean'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_dup' mangled-name='nvlist_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_dup'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='857bb57e' name='nvlp'/>
+ <parameter type-id='95e97e5e' name='kmflag'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_free' mangled-name='nvlist_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_free'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='nvlist_alloc' mangled-name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_alloc'>
+ <parameter type-id='857bb57e' name='nvlp'/>
+ <parameter type-id='3502e3ff' name='nvflag'/>
+ <parameter type-id='95e97e5e' name='kmflag'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_nvflag' mangled-name='nvlist_nvflag' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_nvflag'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <return type-id='3502e3ff'/>
+ </function-decl>
+ <function-decl name='nvlist_lookup_nv_alloc' mangled-name='nvlist_lookup_nv_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nv_alloc'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <return type-id='11871392'/>
+ </function-decl>
+ <function-decl name='nv_alloc_fini' mangled-name='nv_alloc_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nv_alloc_fini'>
+ <parameter type-id='11871392' name='nva'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='nv_alloc_reset' mangled-name='nv_alloc_reset' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nv_alloc_reset'>
+ <parameter type-id='11871392' name='nva'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='nv_alloc_init' mangled-name='nv_alloc_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nv_alloc_init'>
+ <parameter type-id='11871392' name='nva'/>
+ <parameter type-id='ee1d4944' name='nvo'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_xalloc' mangled-name='nvlist_xalloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xalloc'>
+ <parameter type-id='857bb57e' name='nvlp'/>
+ <parameter type-id='3502e3ff' name='nvflag'/>
+ <parameter type-id='11871392' name='nva'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_remove_nvpair' mangled-name='nvlist_remove_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_remove_nvpair'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_remove_all' mangled-name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_remove_all'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_remove' mangled-name='nvlist_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_remove'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='8d0687d2' name='type'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_xdup' mangled-name='nvlist_xdup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xdup'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='857bb57e' name='nvlp'/>
+ <parameter type-id='11871392' name='nva'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_size' mangled-name='nvlist_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_size'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='78c01427' name='size'/>
+ <parameter type-id='95e97e5e' name='encoding'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='nvlist_xpack' mangled-name='nvlist_xpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xpack'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='9b23c9ad' name='bufp'/>
+ <parameter type-id='78c01427' name='buflen'/>
+ <parameter type-id='95e97e5e' name='encoding'/>
+ <parameter type-id='11871392' name='nva'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <pointer-type-def type-id='26a90f95' size-in-bits='64' id='9b23c9ad'/>
+ <pointer-type-def type-id='aca16c06' size-in-bits='64' id='ee1d4944'/>
+ <type-decl name='double' size-in-bits='64' id='a0eb0f08'/>
+ <pointer-type-def type-id='cca08635' size-in-bits='64' id='11871392'/>
+ <typedef-decl name='hrtime_t' type-id='1eb56b1e' id='cebdd548'/>
+ <qualified-type-def type-id='03e8ffd6' const='yes' id='aca16c06'/>
+ <type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
+ <typedef-decl name='nv_alloc_t' type-id='98213087' id='cca08635'/>
+ <class-decl name='nv_alloc' size-in-bits='128' is-struct='yes' visibility='default' id='98213087'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='nva_ops' type-id='ee1d4944' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1568'>
- <var-decl name='_unused2' type-id='type-id-47' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='nva_arg' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='_IO_marker' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-48'>
+ <typedef-decl name='nv_alloc_ops_t' type-id='8f6cc4f4' id='03e8ffd6'/>
+ <class-decl name='nv_alloc_ops' size-in-bits='320' is-struct='yes' visibility='default' id='8f6cc4f4'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_next' type-id='type-id-39' visibility='default'/>
+ <var-decl name='nv_ao_init' type-id='76da8447' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_sbuf' type-id='type-id-40' visibility='default'/>
+ <var-decl name='nv_ao_fini' type-id='fe356f6f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_pos' type-id='type-id-1' visibility='default'/>
+ <var-decl name='nv_ao_alloc' type-id='9ff7f508' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='nv_ao_free' type-id='520da3f4' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='nv_ao_reset' type-id='fe356f6f' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-48' size-in-bits='64' id='type-id-39'/>
- <pointer-type-def type-id='type-id-38' size-in-bits='64' id='type-id-40'/>
- <type-decl name='long int' size-in-bits='64' id='type-id-49'/>
- <typedef-decl name='__off_t' type-id='type-id-49' id='type-id-41'/>
- <type-decl name='unsigned short int' size-in-bits='16' id='type-id-42'/>
- <type-decl name='signed char' size-in-bits='8' id='type-id-43'/>
-
- <array-type-def dimensions='1' type-id='type-id-13' size-in-bits='8' id='type-id-44'>
- <subrange length='1' type-id='type-id-18' id='type-id-50'/>
-
+ <pointer-type-def type-id='48b5725f' size-in-bits='64' id='eaa32e2f'/>
+ <pointer-type-def type-id='e9ff7293' size-in-bits='64' id='76da8447'/>
+ <pointer-type-def type-id='51a21b4b' size-in-bits='64' id='fe356f6f'/>
+ <pointer-type-def type-id='1169c032' size-in-bits='64' id='520da3f4'/>
+ <pointer-type-def type-id='9fff962e' size-in-bits='64' id='9ff7f508'/>
+ <function-type size-in-bits='64' id='51a21b4b'>
+ <parameter type-id='11871392' name='nva'/>
+ <return type-id='48b5725f'/>
+ </function-type>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/nvpair/nvpair_alloc_fixed.c' language='LANG_C99'>
+ <var-decl name='nv_fixed_ops' type-id='ee1d4944' mangled-name='nv_fixed_ops' visibility='default' elf-symbol-id='nv_fixed_ops'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libnvpair.c' language='LANG_C99'>
+ <type-decl name='char' size-in-bits='8' id='a84c031d'/>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='8' id='89feb1ec'>
+ <subrange length='1' type-id='7359adad' id='52f813b4'/>
</array-type-def>
- <typedef-decl name='__off64_t' type-id='type-id-49' id='type-id-45'/>
- <pointer-type-def type-id='type-id-25' size-in-bits='64' id='type-id-46'/>
-
- <array-type-def dimensions='1' type-id='type-id-13' size-in-bits='160' id='type-id-47'>
- <subrange length='20' type-id='type-id-18' id='type-id-51'/>
-
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='160' id='664ac0b7'>
+ <subrange length='20' type-id='7359adad' id='fdca39cf'/>
</array-type-def>
- <typedef-decl name='FILE' type-id='type-id-38' id='type-id-52'/>
- <pointer-type-def type-id='type-id-52' size-in-bits='64' id='type-id-34'/>
- <enum-decl name='nvlist_indent_mode' id='type-id-35'>
- <underlying-type type-id='type-id-9'/>
- <enumerator name='NVLIST_INDENT_ABS' value='0'/>
- <enumerator name='NVLIST_INDENT_TABBED' value='1'/>
- </enum-decl>
- <qualified-type-def type-id='type-id-13' const='yes' id='type-id-53'/>
- <pointer-type-def type-id='type-id-53' size-in-bits='64' id='type-id-36'/>
- <class-decl name='nvlist_printops' size-in-bits='3456' is-struct='yes' visibility='default' id='type-id-54'>
+ <type-decl name='double' size-in-bits='64' id='a0eb0f08'/>
+ <type-decl name='int' size-in-bits='32' id='95e97e5e'/>
+ <type-decl name='long int' size-in-bits='64' id='bd54fe1a'/>
+ <type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
+ <type-decl name='short int' size-in-bits='16' id='a2185560'/>
+ <type-decl name='signed char' size-in-bits='8' id='28577a57'/>
+ <type-decl name='unnamed-enum-underlying-type-32' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='9cac1fee'/>
+ <type-decl name='unsigned char' size-in-bits='8' id='002ac4a6'/>
+ <type-decl name='unsigned int' size-in-bits='32' id='f0981eeb'/>
+ <type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
+ <type-decl name='unsigned short int' size-in-bits='16' id='8efea9e5'/>
+ <type-decl name='variadic parameter type' id='2c1145c5'/>
+ <type-decl name='void' id='48b5725f'/>
+ <typedef-decl name='nvpair_t' type-id='1c34e459' id='57928edf'/>
+ <class-decl name='nvpair' size-in-bits='128' is-struct='yes' visibility='default' id='1c34e459'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='print_boolean' type-id='type-id-55' visibility='default'/>
+ <var-decl name='nvp_size' type-id='3ff5601b' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='print_boolean_value' type-id='type-id-56' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='nvp_name_sz' type-id='23bd8cb5' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='print_byte' type-id='type-id-57' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='print_int8' type-id='type-id-58' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='print_uint8' type-id='type-id-59' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='print_int16' type-id='type-id-60' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='48'>
+ <var-decl name='nvp_reserve' type-id='23bd8cb5' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='print_uint16' type-id='type-id-61' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='nvp_value_elem' type-id='3ff5601b' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='print_int32' type-id='type-id-62' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='nvp_type' type-id='8d0687d2' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='print_uint32' type-id='type-id-63' visibility='default'/>
+ </class-decl>
+ <typedef-decl name='int32_t' type-id='33f57a65' id='3ff5601b'/>
+ <typedef-decl name='__int32_t' type-id='95e97e5e' id='33f57a65'/>
+ <typedef-decl name='int16_t' type-id='03896e23' id='23bd8cb5'/>
+ <typedef-decl name='__int16_t' type-id='a2185560' id='03896e23'/>
+ <typedef-decl name='data_type_t' type-id='08f5ca17' id='8d0687d2'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca17'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='DATA_TYPE_DONTCARE' value='-1'/>
+ <enumerator name='DATA_TYPE_UNKNOWN' value='0'/>
+ <enumerator name='DATA_TYPE_BOOLEAN' value='1'/>
+ <enumerator name='DATA_TYPE_BYTE' value='2'/>
+ <enumerator name='DATA_TYPE_INT16' value='3'/>
+ <enumerator name='DATA_TYPE_UINT16' value='4'/>
+ <enumerator name='DATA_TYPE_INT32' value='5'/>
+ <enumerator name='DATA_TYPE_UINT32' value='6'/>
+ <enumerator name='DATA_TYPE_INT64' value='7'/>
+ <enumerator name='DATA_TYPE_UINT64' value='8'/>
+ <enumerator name='DATA_TYPE_STRING' value='9'/>
+ <enumerator name='DATA_TYPE_BYTE_ARRAY' value='10'/>
+ <enumerator name='DATA_TYPE_INT16_ARRAY' value='11'/>
+ <enumerator name='DATA_TYPE_UINT16_ARRAY' value='12'/>
+ <enumerator name='DATA_TYPE_INT32_ARRAY' value='13'/>
+ <enumerator name='DATA_TYPE_UINT32_ARRAY' value='14'/>
+ <enumerator name='DATA_TYPE_INT64_ARRAY' value='15'/>
+ <enumerator name='DATA_TYPE_UINT64_ARRAY' value='16'/>
+ <enumerator name='DATA_TYPE_STRING_ARRAY' value='17'/>
+ <enumerator name='DATA_TYPE_HRTIME' value='18'/>
+ <enumerator name='DATA_TYPE_NVLIST' value='19'/>
+ <enumerator name='DATA_TYPE_NVLIST_ARRAY' value='20'/>
+ <enumerator name='DATA_TYPE_BOOLEAN_VALUE' value='21'/>
+ <enumerator name='DATA_TYPE_INT8' value='22'/>
+ <enumerator name='DATA_TYPE_UINT8' value='23'/>
+ <enumerator name='DATA_TYPE_BOOLEAN_ARRAY' value='24'/>
+ <enumerator name='DATA_TYPE_INT8_ARRAY' value='25'/>
+ <enumerator name='DATA_TYPE_UINT8_ARRAY' value='26'/>
+ <enumerator name='DATA_TYPE_DOUBLE' value='27'/>
+ </enum-decl>
+ <typedef-decl name='regex_t' type-id='19fc9a8c' id='aca3bac8'/>
+ <class-decl name='re_pattern_buffer' size-in-bits='512' is-struct='yes' visibility='default' id='19fc9a8c'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='buffer' type-id='cf536864' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='print_int64' type-id='type-id-64' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='allocated' type-id='7359adad' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='print_uint64' type-id='type-id-65' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='used' type-id='7359adad' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1408'>
- <var-decl name='print_double' type-id='type-id-66' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='syntax' type-id='1b72c3b3' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1536'>
- <var-decl name='print_string' type-id='type-id-67' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='fastmap' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1664'>
- <var-decl name='print_hrtime' type-id='type-id-68' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='translate' type-id='cf536864' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1792'>
- <var-decl name='print_nvlist' type-id='type-id-69' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='re_nsub' type-id='b59d7dce' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1920'>
- <var-decl name='print_boolean_array' type-id='type-id-70' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='31'>
+ <var-decl name='can_be_null' type-id='f0981eeb' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2048'>
- <var-decl name='print_byte_array' type-id='type-id-71' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='29'>
+ <var-decl name='regs_allocated' type-id='f0981eeb' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2176'>
- <var-decl name='print_int8_array' type-id='type-id-72' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='28'>
+ <var-decl name='fastmap_accurate' type-id='f0981eeb' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2304'>
- <var-decl name='print_uint8_array' type-id='type-id-73' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='27'>
+ <var-decl name='no_sub' type-id='f0981eeb' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2432'>
- <var-decl name='print_int16_array' type-id='type-id-74' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='26'>
+ <var-decl name='not_bol' type-id='f0981eeb' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2560'>
- <var-decl name='print_uint16_array' type-id='type-id-75' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='25'>
+ <var-decl name='not_eol' type-id='f0981eeb' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2688'>
- <var-decl name='print_int32_array' type-id='type-id-76' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='24'>
+ <var-decl name='newline_anchor' type-id='f0981eeb' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2816'>
- <var-decl name='print_uint32_array' type-id='type-id-77' visibility='default'/>
+ </class-decl>
+ <typedef-decl name='reg_syntax_t' type-id='7359adad' id='1b72c3b3'/>
+ <typedef-decl name='size_t' type-id='7359adad' id='b59d7dce'/>
+ <typedef-decl name='nvlist_t' type-id='ac266fd9' id='8e8d4be3'/>
+ <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='ac266fd9'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='nvl_version' type-id='3ff5601b' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2944'>
- <var-decl name='print_int64_array' type-id='type-id-78' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='nvl_nvflag' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='3072'>
- <var-decl name='print_uint64_array' type-id='type-id-79' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='nvl_priv' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='3200'>
- <var-decl name='print_string_array' type-id='type-id-80' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='nvl_flag' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='3328'>
- <var-decl name='print_nvlist_array' type-id='type-id-81' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='nvl_pad' type-id='3ff5601b' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-55'>
+ <typedef-decl name='uint32_t' type-id='62f1140c' id='8f92235e'/>
+ <typedef-decl name='__uint32_t' type-id='f0981eeb' id='62f1140c'/>
+ <typedef-decl name='uint64_t' type-id='8910171f' id='9c313c2d'/>
+ <typedef-decl name='__uint64_t' type-id='7359adad' id='8910171f'/>
+ <typedef-decl name='nvlist_prtctl_t' type-id='196db161' id='b0c1ff8d'/>
+ <class-decl name='nvlist_prtctl' size-in-bits='576' is-struct='yes' visibility='default' id='d2e8bad9'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-82' visibility='default'/>
+ <var-decl name='nvprt_fp' type-id='822cd80b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <var-decl name='nvprt_indent_mode' type-id='628aafab' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-33' size-in-bits='64' id='type-id-83'/>
- <pointer-type-def type-id='type-id-84' size-in-bits='64' id='type-id-82'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-56'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-85' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='nvprt_indent' type-id='95e97e5e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='nvprt_indentinc' type-id='95e97e5e' visibility='default'/>
</data-member>
- </class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-86'>
- <underlying-type type-id='type-id-9'/>
- <enumerator name='B_FALSE' value='0'/>
- <enumerator name='B_TRUE' value='1'/>
- </enum-decl>
- <typedef-decl name='boolean_t' type-id='type-id-86' id='type-id-87'/>
- <pointer-type-def type-id='type-id-88' size-in-bits='64' id='type-id-85'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-57'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-89' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='nvprt_nmfmt' type-id='80f4b756' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='nvprt_eomfmt' type-id='80f4b756' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='uchar_t' type-id='type-id-22' id='type-id-90'/>
- <pointer-type-def type-id='type-id-91' size-in-bits='64' id='type-id-89'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-58'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-92' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='nvprt_btwnarrfmt' type-id='80f4b756' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='nvprt_btwnarrfmt_nl' type-id='95e97e5e' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='__int8_t' type-id='type-id-43' id='type-id-93'/>
- <typedef-decl name='int8_t' type-id='type-id-93' id='type-id-94'/>
- <pointer-type-def type-id='type-id-95' size-in-bits='64' id='type-id-92'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-59'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-96' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='nvprt_dfltops' type-id='7be54adb' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='nvprt_custops' type-id='7be54adb' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__uint8_t' type-id='type-id-22' id='type-id-97'/>
- <typedef-decl name='uint8_t' type-id='type-id-97' id='type-id-98'/>
- <pointer-type-def type-id='type-id-99' size-in-bits='64' id='type-id-96'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-60'>
+ <typedef-decl name='FILE' type-id='ec1ed955' id='aa12d1ba'/>
+ <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='ec1ed955'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-100' visibility='default'/>
+ <var-decl name='_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <var-decl name='_IO_read_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-101' size-in-bits='64' id='type-id-100'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-61'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-102' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='_IO_read_end' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='_IO_read_base' type-id='26a90f95' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='__uint16_t' type-id='type-id-42' id='type-id-103'/>
- <typedef-decl name='uint16_t' type-id='type-id-103' id='type-id-104'/>
- <pointer-type-def type-id='type-id-105' size-in-bits='64' id='type-id-102'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-62'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-106' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='_IO_write_base' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='_IO_write_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-107' size-in-bits='64' id='type-id-106'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-63'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-108' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='_IO_write_end' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='_IO_buf_base' type-id='26a90f95' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-109' size-in-bits='64' id='type-id-108'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-64'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-110' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='_IO_buf_end' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='_IO_save_base' type-id='26a90f95' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='__int64_t' type-id='type-id-49' id='type-id-111'/>
- <typedef-decl name='int64_t' type-id='type-id-111' id='type-id-112'/>
- <pointer-type-def type-id='type-id-113' size-in-bits='64' id='type-id-110'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-65'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-114' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='640'>
+ <var-decl name='_IO_backup_base' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='704'>
+ <var-decl name='_IO_save_end' type-id='26a90f95' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-115' size-in-bits='64' id='type-id-114'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-66'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-116' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='768'>
+ <var-decl name='_markers' type-id='e4c6fa61' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='832'>
+ <var-decl name='_chain' type-id='dca988a5' visibility='default'/>
</data-member>
- </class-decl>
- <type-decl name='double' size-in-bits='64' id='type-id-117'/>
- <pointer-type-def type-id='type-id-118' size-in-bits='64' id='type-id-116'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-67'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-119' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='896'>
+ <var-decl name='_fileno' type-id='95e97e5e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='928'>
+ <var-decl name='_flags2' type-id='95e97e5e' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-120' size-in-bits='64' id='type-id-119'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-68'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-121' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='960'>
+ <var-decl name='_old_offset' type-id='79989e9c' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1024'>
+ <var-decl name='_cur_column' type-id='8efea9e5' visibility='default'/>
</data-member>
- </class-decl>
- <type-decl name='long long int' size-in-bits='64' id='type-id-122'/>
- <typedef-decl name='hrtime_t' type-id='type-id-122' id='type-id-123'/>
- <pointer-type-def type-id='type-id-124' size-in-bits='64' id='type-id-121'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-69'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-125' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1040'>
+ <var-decl name='_vtable_offset' type-id='28577a57' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1048'>
+ <var-decl name='_shortbuf' type-id='89feb1ec' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-126' size-in-bits='64' id='type-id-125'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-70'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-127' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1088'>
+ <var-decl name='_lock' type-id='cecf4ea7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1152'>
+ <var-decl name='_offset' type-id='724e4de6' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-87' size-in-bits='64' id='type-id-128'/>
- <typedef-decl name='uint_t' type-id='type-id-21' id='type-id-129'/>
- <pointer-type-def type-id='type-id-130' size-in-bits='64' id='type-id-127'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-71'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-131' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1216'>
+ <var-decl name='__pad1' type-id='eaa32e2f' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1280'>
+ <var-decl name='__pad2' type-id='eaa32e2f' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-90' size-in-bits='64' id='type-id-132'/>
- <pointer-type-def type-id='type-id-133' size-in-bits='64' id='type-id-131'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-72'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-134' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1344'>
+ <var-decl name='__pad3' type-id='eaa32e2f' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1408'>
+ <var-decl name='__pad4' type-id='eaa32e2f' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-94' size-in-bits='64' id='type-id-135'/>
- <pointer-type-def type-id='type-id-136' size-in-bits='64' id='type-id-134'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-73'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-137' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1472'>
+ <var-decl name='__pad5' type-id='b59d7dce' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1536'>
+ <var-decl name='_mode' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1568'>
+ <var-decl name='_unused2' type-id='664ac0b7' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-98' size-in-bits='64' id='type-id-138'/>
- <pointer-type-def type-id='type-id-139' size-in-bits='64' id='type-id-137'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-74'>
+ <class-decl name='_IO_marker' size-in-bits='192' is-struct='yes' visibility='default' id='010ae0b9'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-140' visibility='default'/>
+ <var-decl name='_next' type-id='e4c6fa61' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <var-decl name='_sbuf' type-id='dca988a5' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='_pos' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-4' size-in-bits='64' id='type-id-141'/>
- <pointer-type-def type-id='type-id-142' size-in-bits='64' id='type-id-140'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-75'>
+ <typedef-decl name='__off_t' type-id='bd54fe1a' id='79989e9c'/>
+ <typedef-decl name='_IO_lock_t' type-id='48b5725f' id='bb4788fa'/>
+ <typedef-decl name='__off64_t' type-id='bd54fe1a' id='724e4de6'/>
+ <enum-decl name='nvlist_indent_mode' id='628aafab'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='NVLIST_INDENT_ABS' value='0'/>
+ <enumerator name='NVLIST_INDENT_TABBED' value='1'/>
+ </enum-decl>
+ <class-decl name='nvlist_printops' size-in-bits='3456' is-struct='yes' visibility='default' id='ebc6735b'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-143' visibility='default'/>
+ <var-decl name='print_boolean' type-id='47d8e2d1' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='print_boolean_value' type-id='8a6f2dcc' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-104' size-in-bits='64' id='type-id-144'/>
- <pointer-type-def type-id='type-id-145' size-in-bits='64' id='type-id-143'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-76'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-146' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='print_byte' type-id='bdf563df' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='print_int8' type-id='5636b8e3' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-3' size-in-bits='64' id='type-id-147'/>
- <pointer-type-def type-id='type-id-148' size-in-bits='64' id='type-id-146'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-77'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-149' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='print_uint8' type-id='0119a618' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='640'>
+ <var-decl name='print_int16' type-id='4657e0ba' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-27' size-in-bits='64' id='type-id-150'/>
- <pointer-type-def type-id='type-id-151' size-in-bits='64' id='type-id-149'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-78'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-152' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='768'>
+ <var-decl name='print_uint16' type-id='ecfe67d7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='896'>
+ <var-decl name='print_int32' type-id='8947fe4c' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-112' size-in-bits='64' id='type-id-153'/>
- <pointer-type-def type-id='type-id-154' size-in-bits='64' id='type-id-152'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-79'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-155' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1024'>
+ <var-decl name='print_uint32' type-id='365a6549' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1152'>
+ <var-decl name='print_int64' type-id='d6ce379b' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1280'>
+ <var-decl name='print_uint64' type-id='bb34572a' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1408'>
+ <var-decl name='print_double' type-id='ef32d857' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1536'>
+ <var-decl name='print_string' type-id='f6ce752a' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1664'>
+ <var-decl name='print_hrtime' type-id='c61b59cf' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1792'>
+ <var-decl name='print_nvlist' type-id='1178977f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1920'>
+ <var-decl name='print_boolean_array' type-id='15d12763' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2048'>
+ <var-decl name='print_byte_array' type-id='4207d3e6' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2176'>
+ <var-decl name='print_int8_array' type-id='e4cdea78' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2304'>
+ <var-decl name='print_uint8_array' type-id='252509cf' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2432'>
+ <var-decl name='print_int16_array' type-id='3cf98639' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2560'>
+ <var-decl name='print_uint16_array' type-id='060bdb18' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2688'>
+ <var-decl name='print_int32_array' type-id='bbaa8a1b' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2816'>
+ <var-decl name='print_uint32_array' type-id='745b46ee' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2944'>
+ <var-decl name='print_int64_array' type-id='223df2d6' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='3072'>
+ <var-decl name='print_uint64_array' type-id='f564486f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='3200'>
+ <var-decl name='print_string_array' type-id='f15f91ac' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='3328'>
+ <var-decl name='print_nvlist_array' type-id='f885c1bf' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-28' size-in-bits='64' id='type-id-156'/>
- <pointer-type-def type-id='type-id-157' size-in-bits='64' id='type-id-155'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-80'>
+ <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='47d8e2d1'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-158' visibility='default'/>
+ <var-decl name='op' type-id='6d994334' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-159' size-in-bits='64' id='type-id-158'/>
- <class-decl name='__anonymous_struct__' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-81'>
+ <class-decl name='__anonymous_struct__1' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='8a6f2dcc'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='op' type-id='type-id-160' visibility='default'/>
+ <var-decl name='op' type-id='6a2f50c1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='arg' type-id='type-id-46' visibility='default'/>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-32' size-in-bits='64' id='type-id-161'/>
- <pointer-type-def type-id='type-id-162' size-in-bits='64' id='type-id-160'/>
- <pointer-type-def type-id='type-id-54' size-in-bits='64' id='type-id-37'/>
- <typedef-decl name='nvlist_prtctl_t' type-id='type-id-83' id='type-id-163'/>
- <function-decl name='nvlist_prt' mangled-name='nvlist_prt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prt'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-163' name='pctl'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_print' mangled-name='nvlist_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_print'>
- <parameter type-id='type-id-34' name='fp'/>
- <parameter type-id='type-id-32' name='nvl'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_prtctl_free' mangled-name='nvlist_prtctl_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_free'>
- <parameter type-id='type-id-163' name='pctl'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_prtctl_alloc' mangled-name='nvlist_prtctl_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_alloc'>
- <return type-id='type-id-163'/>
- </function-decl>
- <pointer-type-def type-id='type-id-164' size-in-bits='64' id='type-id-165'/>
- <function-decl name='nvlist_prtctlop_nvlist_array' mangled-name='nvlist_prtctlop_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_nvlist_array'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-165' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-166' size-in-bits='64' id='type-id-167'/>
- <function-decl name='nvlist_prtctlop_string_array' mangled-name='nvlist_prtctlop_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_string_array'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-167' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-168' size-in-bits='64' id='type-id-169'/>
- <function-decl name='nvlist_prtctlop_uint64_array' mangled-name='nvlist_prtctlop_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint64_array'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-169' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-170' size-in-bits='64' id='type-id-171'/>
- <function-decl name='nvlist_prtctlop_int64_array' mangled-name='nvlist_prtctlop_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int64_array'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-171' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-172' size-in-bits='64' id='type-id-173'/>
- <function-decl name='nvlist_prtctlop_uint32_array' mangled-name='nvlist_prtctlop_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint32_array'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-173' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-174' size-in-bits='64' id='type-id-175'/>
- <function-decl name='nvlist_prtctlop_int32_array' mangled-name='nvlist_prtctlop_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int32_array'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-175' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-176' size-in-bits='64' id='type-id-177'/>
- <function-decl name='nvlist_prtctlop_uint16_array' mangled-name='nvlist_prtctlop_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint16_array'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-177' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-178' size-in-bits='64' id='type-id-179'/>
- <function-decl name='nvlist_prtctlop_int16_array' mangled-name='nvlist_prtctlop_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int16_array'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-179' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-180' size-in-bits='64' id='type-id-181'/>
- <function-decl name='nvlist_prtctlop_uint8_array' mangled-name='nvlist_prtctlop_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint8_array'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-181' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-182' size-in-bits='64' id='type-id-183'/>
- <function-decl name='nvlist_prtctlop_int8_array' mangled-name='nvlist_prtctlop_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int8_array'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-183' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-184' size-in-bits='64' id='type-id-185'/>
- <function-decl name='nvlist_prtctlop_byte_array' mangled-name='nvlist_prtctlop_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_byte_array'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-185' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-186' size-in-bits='64' id='type-id-187'/>
- <function-decl name='nvlist_prtctlop_boolean_array' mangled-name='nvlist_prtctlop_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean_array'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-187' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-188' size-in-bits='64' id='type-id-189'/>
- <function-decl name='nvlist_prtctlop_nvlist' mangled-name='nvlist_prtctlop_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_nvlist'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-189' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-190' size-in-bits='64' id='type-id-191'/>
- <function-decl name='nvlist_prtctlop_hrtime' mangled-name='nvlist_prtctlop_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_hrtime'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-191' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-192' size-in-bits='64' id='type-id-193'/>
- <function-decl name='nvlist_prtctlop_string' mangled-name='nvlist_prtctlop_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_string'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-193' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-194' size-in-bits='64' id='type-id-195'/>
- <function-decl name='nvlist_prtctlop_double' mangled-name='nvlist_prtctlop_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_double'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-195' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-196' size-in-bits='64' id='type-id-197'/>
- <function-decl name='nvlist_prtctlop_uint64' mangled-name='nvlist_prtctlop_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint64'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-197' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-198' size-in-bits='64' id='type-id-199'/>
- <function-decl name='nvlist_prtctlop_int64' mangled-name='nvlist_prtctlop_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int64'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-199' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-200' size-in-bits='64' id='type-id-201'/>
- <function-decl name='nvlist_prtctlop_uint32' mangled-name='nvlist_prtctlop_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint32'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-201' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-202' size-in-bits='64' id='type-id-203'/>
- <function-decl name='nvlist_prtctlop_int32' mangled-name='nvlist_prtctlop_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int32'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-203' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-204' size-in-bits='64' id='type-id-205'/>
- <function-decl name='nvlist_prtctlop_uint16' mangled-name='nvlist_prtctlop_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint16'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-205' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-206' size-in-bits='64' id='type-id-207'/>
- <function-decl name='nvlist_prtctlop_int16' mangled-name='nvlist_prtctlop_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int16'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-207' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-208' size-in-bits='64' id='type-id-209'/>
- <function-decl name='nvlist_prtctlop_uint8' mangled-name='nvlist_prtctlop_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint8'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-209' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-210' size-in-bits='64' id='type-id-211'/>
- <function-decl name='nvlist_prtctlop_int8' mangled-name='nvlist_prtctlop_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int8'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-211' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-212' size-in-bits='64' id='type-id-213'/>
- <function-decl name='nvlist_prtctlop_byte' mangled-name='nvlist_prtctlop_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_byte'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-213' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-214' size-in-bits='64' id='type-id-215'/>
- <function-decl name='nvlist_prtctlop_boolean_value' mangled-name='nvlist_prtctlop_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean_value'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-215' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <pointer-type-def type-id='type-id-216' size-in-bits='64' id='type-id-217'/>
- <function-decl name='nvlist_prtctlop_boolean' mangled-name='nvlist_prtctlop_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-217' name='func'/>
- <parameter type-id='type-id-46' name='private'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <enum-decl name='nvlist_prtctl_fmt' id='type-id-218'>
- <underlying-type type-id='type-id-9'/>
- <enumerator name='NVLIST_FMT_MEMBER_NAME' value='0'/>
- <enumerator name='NVLIST_FMT_MEMBER_POSTAMBLE' value='1'/>
- <enumerator name='NVLIST_FMT_BTWN_ARRAY' value='2'/>
+ <typedef-decl name='boolean_t' type-id='40ed39d2' id='c19b74c3'/>
+ <enum-decl name='__anonymous_enum__1' is-anonymous='yes' id='40ed39d2'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='B_FALSE' value='0'/>
+ <enumerator name='B_TRUE' value='1'/>
</enum-decl>
- <function-decl name='nvlist_prtctl_dofmt' mangled-name='nvlist_prtctl_dofmt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_dofmt'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-218' name='which'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_prtctl_setfmt' mangled-name='nvlist_prtctl_setfmt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setfmt'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-218' name='which'/>
- <parameter type-id='type-id-36' name='fmt'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_prtctl_doindent' mangled-name='nvlist_prtctl_doindent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_doindent'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-1' name='onemore'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_prtctl_setindent' mangled-name='nvlist_prtctl_setindent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setindent'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-35' name='mode'/>
- <parameter type-id='type-id-1' name='start'/>
- <parameter type-id='type-id-1' name='inc'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_prtctl_getdest' mangled-name='nvlist_prtctl_getdest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_getdest'>
- <parameter type-id='type-id-163' name='pctl'/>
- <return type-id='type-id-34'/>
- </function-decl>
- <function-decl name='nvlist_prtctl_setdest' mangled-name='nvlist_prtctl_setdest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setdest'>
- <parameter type-id='type-id-163' name='pctl'/>
- <parameter type-id='type-id-34' name='fp'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_string' mangled-name='nvpair_value_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_string_array' mangled-name='nvpair_value_string_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='regexec' mangled-name='regexec' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='strcmp' mangled-name='strcmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='sscanf' mangled-name='sscanf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_byte' mangled-name='nvpair_value_byte' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_byte_array' mangled-name='nvpair_value_byte_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_int8' mangled-name='nvpair_value_int8' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_int8_array' mangled-name='nvpair_value_int8_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_uint8' mangled-name='nvpair_value_uint8' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_uint8_array' mangled-name='nvpair_value_uint8_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_int16' mangled-name='nvpair_value_int16' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_int16_array' mangled-name='nvpair_value_int16_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_uint16' mangled-name='nvpair_value_uint16' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_uint16_array' mangled-name='nvpair_value_uint16_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_int32' mangled-name='nvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_int32_array' mangled-name='nvpair_value_int32_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_uint32' mangled-name='nvpair_value_uint32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_uint32_array' mangled-name='nvpair_value_uint32_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_int64' mangled-name='nvpair_value_int64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_int64_array' mangled-name='nvpair_value_int64_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_uint64' mangled-name='nvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_uint64_array' mangled-name='nvpair_value_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_boolean_value' mangled-name='nvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_boolean_array' mangled-name='nvpair_value_boolean_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_type_is_array' mangled-name='nvpair_type_is_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_type' mangled-name='nvpair_type' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='strspn' mangled-name='strspn' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='strcspn' mangled-name='strcspn' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='__stack_chk_fail' mangled-name='__stack_chk_fail' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='__printf_chk' mangled-name='__printf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_name' mangled-name='nvpair_name' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_next_nvpair' mangled-name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_nvlist' mangled-name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_nvlist_array' mangled-name='nvpair_value_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='dcgettext' mangled-name='dcgettext' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_double' mangled-name='nvpair_value_double' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvpair_value_hrtime' mangled-name='nvpair_value_hrtime' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='__builtin_fputs' mangled-name='fputs' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='__fprintf_chk' mangled-name='__fprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='free' mangled-name='free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='malloc' mangled-name='malloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='calloc' mangled-name='calloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='__builtin_strchr' mangled-name='strchr' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='__builtin_fputc' mangled-name='fputc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-type size-in-bits='64' id='type-id-130'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-128'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-120'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-14'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-159'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-15'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-118'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-117'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-84'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-142'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-141'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-148'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-147'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-154'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-153'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-136'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-135'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-126'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-32'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-162'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-161'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-88'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-87'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-124'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-101'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-4'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-107'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-3'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-113'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-112'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-95'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-94'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-91'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-90'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-105'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-109'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-27'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-115'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-28'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-99'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-98'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-133'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-132'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-145'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-144'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-151'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-150'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-157'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-156'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-139'>
- <parameter type-id='type-id-83'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-138'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-186'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-128'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-192'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-14'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-166'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-15'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-194'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-117'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-216'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-1'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-178'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-141'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-174'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-147'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-170'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-153'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-182'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-135'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-188'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-32'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-164'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-161'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-214'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-87'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-190'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-123'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-206'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-4'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-202'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-3'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-198'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-112'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-210'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-94'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-212'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-90'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-204'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-104'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-200'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-27'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-196'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-28'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-208'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-98'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-184'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-132'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-176'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-144'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-172'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-150'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-168'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-156'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-180'>
- <parameter type-id='type-id-163'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-32'/>
- <parameter type-id='type-id-36'/>
- <parameter type-id='type-id-138'/>
- <parameter type-id='type-id-129'/>
- <return type-id='type-id-1'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libnvpair_json.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libnvpair' language='LANG_C99'>
- <function-decl name='nvlist_print_json' mangled-name='nvlist_print_json' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_print_json'>
- <parameter type-id='type-id-34' name='fp'/>
- <parameter type-id='type-id-32' name='nvl'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvpair_value_string' mangled-name='fnvpair_value_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvpair_value_byte' mangled-name='fnvpair_value_byte' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvpair_value_int16' mangled-name='fnvpair_value_int16' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvpair_value_uint16' mangled-name='fnvpair_value_uint16' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvpair_value_int32' mangled-name='fnvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvpair_value_uint32' mangled-name='fnvpair_value_uint32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvpair_value_int64' mangled-name='fnvpair_value_int64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvpair_value_uint64' mangled-name='fnvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvpair_value_nvlist' mangled-name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvpair_value_boolean_value' mangled-name='fnvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvpair_value_int8' mangled-name='fnvpair_value_int8' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvpair_value_uint8' mangled-name='fnvpair_value_uint8' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='__ctype_get_mb_cur_max' mangled-name='__ctype_get_mb_cur_max' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='mbrtowc' mangled-name='mbrtowc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='nvpair_alloc_system.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libnvpair' language='LANG_C99'>
- <class-decl name='nv_alloc' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-219'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nva_ops' type-id='type-id-220' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nva_arg' type-id='type-id-46' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='nv_alloc_ops' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-221'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nv_ao_init' type-id='type-id-222' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nv_ao_fini' type-id='type-id-223' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='nv_ao_alloc' type-id='type-id-224' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='nv_ao_free' type-id='type-id-225' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='nv_ao_reset' type-id='type-id-223' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='nv_alloc_t' type-id='type-id-219' id='type-id-226'/>
- <pointer-type-def type-id='type-id-226' size-in-bits='64' id='type-id-227'/>
- <class-decl name='__va_list_tag' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-228'>
+ <class-decl name='__anonymous_struct__2' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='bdf563df'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='gp_offset' type-id='type-id-21' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='fp_offset' type-id='type-id-21' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='overflow_arg_area' type-id='type-id-46' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='reg_save_area' type-id='type-id-46' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-228' size-in-bits='64' id='type-id-229'/>
- <pointer-type-def type-id='type-id-230' size-in-bits='64' id='type-id-222'/>
- <pointer-type-def type-id='type-id-231' size-in-bits='64' id='type-id-223'/>
- <pointer-type-def type-id='type-id-232' size-in-bits='64' id='type-id-224'/>
- <pointer-type-def type-id='type-id-233' size-in-bits='64' id='type-id-225'/>
- <typedef-decl name='nv_alloc_ops_t' type-id='type-id-221' id='type-id-234'/>
- <qualified-type-def type-id='type-id-234' const='yes' id='type-id-235'/>
- <pointer-type-def type-id='type-id-235' size-in-bits='64' id='type-id-220'/>
- <var-decl name='nv_alloc_nosleep' type-id='type-id-227' mangled-name='nv_alloc_nosleep' visibility='default' elf-symbol-id='nv_alloc_nosleep'/>
- <function-type size-in-bits='64' id='type-id-230'>
- <parameter type-id='type-id-227'/>
- <parameter type-id='type-id-229'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-231'>
- <parameter type-id='type-id-227'/>
- <return type-id='type-id-25'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-233'>
- <parameter type-id='type-id-227'/>
- <parameter type-id='type-id-46'/>
- <parameter type-id='type-id-20'/>
- <return type-id='type-id-25'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-232'>
- <parameter type-id='type-id-227'/>
- <parameter type-id='type-id-20'/>
- <return type-id='type-id-46'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/nvpair/nvpair_alloc_fixed.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libnvpair' language='LANG_C99'>
- <var-decl name='nv_fixed_ops' type-id='type-id-220' mangled-name='nv_fixed_ops' visibility='default' elf-symbol-id='nv_fixed_ops'/>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/nvpair/nvpair.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libnvpair' language='LANG_C99'>
- <function-decl name='nvlist_xunpack' mangled-name='nvlist_xunpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xunpack'>
- <parameter type-id='type-id-14' name='buf'/>
- <parameter type-id='type-id-20' name='buflen'/>
- <parameter type-id='type-id-161' name='nvlp'/>
- <parameter type-id='type-id-227' name='nva'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_unpack' mangled-name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_unpack'>
- <parameter type-id='type-id-14' name='buf'/>
- <parameter type-id='type-id-20' name='buflen'/>
- <parameter type-id='type-id-161' name='nvlp'/>
- <parameter type-id='type-id-1' name='kmflag'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-20' size-in-bits='64' id='type-id-236'/>
- <function-decl name='nvlist_pack' mangled-name='nvlist_pack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_pack'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-15' name='bufp'/>
- <parameter type-id='type-id-236' name='buflen'/>
- <parameter type-id='type-id-1' name='encoding'/>
- <parameter type-id='type-id-1' name='kmflag'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_merge' mangled-name='nvlist_merge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_merge'>
- <parameter type-id='type-id-32' name='dst'/>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-1' name='flag'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_nvpair' mangled-name='nvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_nvpair'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-123' size-in-bits='64' id='type-id-237'/>
- <function-decl name='nvpair_value_hrtime' mangled-name='nvpair_value_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_hrtime'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-237' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-161' size-in-bits='64' id='type-id-238'/>
- <pointer-type-def type-id='type-id-129' size-in-bits='64' id='type-id-239'/>
- <function-decl name='nvpair_value_nvlist_array' mangled-name='nvpair_value_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_nvlist_array'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-238' name='val'/>
- <parameter type-id='type-id-239' name='nelem'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-15' size-in-bits='64' id='type-id-240'/>
- <function-decl name='nvpair_value_string_array' mangled-name='nvpair_value_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_string_array'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-240' name='val'/>
- <parameter type-id='type-id-239' name='nelem'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-156' size-in-bits='64' id='type-id-241'/>
- <function-decl name='nvpair_value_uint64_array' mangled-name='nvpair_value_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint64_array'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-241' name='val'/>
- <parameter type-id='type-id-239' name='nelem'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-153' size-in-bits='64' id='type-id-242'/>
- <function-decl name='nvpair_value_int64_array' mangled-name='nvpair_value_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int64_array'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-242' name='val'/>
- <parameter type-id='type-id-239' name='nelem'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-150' size-in-bits='64' id='type-id-243'/>
- <function-decl name='nvpair_value_uint32_array' mangled-name='nvpair_value_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint32_array'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-243' name='val'/>
- <parameter type-id='type-id-239' name='nelem'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-147' size-in-bits='64' id='type-id-244'/>
- <function-decl name='nvpair_value_int32_array' mangled-name='nvpair_value_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int32_array'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-244' name='val'/>
- <parameter type-id='type-id-239' name='nelem'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-144' size-in-bits='64' id='type-id-245'/>
- <function-decl name='nvpair_value_uint16_array' mangled-name='nvpair_value_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint16_array'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-245' name='val'/>
- <parameter type-id='type-id-239' name='nelem'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-141' size-in-bits='64' id='type-id-246'/>
- <function-decl name='nvpair_value_int16_array' mangled-name='nvpair_value_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int16_array'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-246' name='val'/>
- <parameter type-id='type-id-239' name='nelem'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-138' size-in-bits='64' id='type-id-247'/>
- <function-decl name='nvpair_value_uint8_array' mangled-name='nvpair_value_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint8_array'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-247' name='val'/>
- <parameter type-id='type-id-239' name='nelem'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-135' size-in-bits='64' id='type-id-248'/>
- <function-decl name='nvpair_value_int8_array' mangled-name='nvpair_value_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int8_array'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-248' name='val'/>
- <parameter type-id='type-id-239' name='nelem'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-132' size-in-bits='64' id='type-id-249'/>
- <function-decl name='nvpair_value_byte_array' mangled-name='nvpair_value_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_byte_array'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-249' name='val'/>
- <parameter type-id='type-id-239' name='nelem'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-128' size-in-bits='64' id='type-id-250'/>
- <function-decl name='nvpair_value_boolean_array' mangled-name='nvpair_value_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_boolean_array'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-250' name='val'/>
- <parameter type-id='type-id-239' name='nelem'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_nvlist' mangled-name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_nvlist'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-161' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_string' mangled-name='nvpair_value_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_string'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-15' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-117' size-in-bits='64' id='type-id-251'/>
- <function-decl name='nvpair_value_double' mangled-name='nvpair_value_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_double'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-251' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_uint64' mangled-name='nvpair_value_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint64'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-156' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_int64' mangled-name='nvpair_value_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int64'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-153' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_uint32' mangled-name='nvpair_value_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint32'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-150' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_int32' mangled-name='nvpair_value_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int32'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-147' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_uint16' mangled-name='nvpair_value_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint16'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-144' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_int16' mangled-name='nvpair_value_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int16'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-141' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_uint8' mangled-name='nvpair_value_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_uint8'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-138' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_int8' mangled-name='nvpair_value_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_int8'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-135' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_byte' mangled-name='nvpair_value_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_byte'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-132' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_boolean_value' mangled-name='nvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_boolean_value'>
- <parameter type-id='type-id-12' name='nvp'/>
- <parameter type-id='type-id-128' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_exists' mangled-name='nvlist_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_exists'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-87'/>
- </function-decl>
- <pointer-type-def type-id='type-id-12' size-in-bits='64' id='type-id-252'/>
- <pointer-type-def type-id='type-id-1' size-in-bits='64' id='type-id-253'/>
- <function-decl name='nvlist_lookup_nvpair_embedded_index' mangled-name='nvlist_lookup_nvpair_embedded_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvpair_embedded_index'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-252' name='ret'/>
- <parameter type-id='type-id-253' name='ip'/>
- <parameter type-id='type-id-15' name='ep'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_nvpair' mangled-name='nvlist_lookup_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvpair'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-252' name='ret'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_pairs' mangled-name='nvlist_lookup_pairs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_pairs'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-1' name='flag'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_hrtime' mangled-name='nvlist_lookup_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_hrtime'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-237' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_nvlist_array' mangled-name='nvlist_lookup_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvlist_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-238' name='a'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_string_array' mangled-name='nvlist_lookup_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_string_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-240' name='a'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint64_array' mangled-name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint64_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-241' name='a'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int64_array' mangled-name='nvlist_lookup_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int64_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-242' name='a'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint32_array' mangled-name='nvlist_lookup_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint32_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-243' name='a'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int32_array' mangled-name='nvlist_lookup_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int32_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-244' name='a'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint16_array' mangled-name='nvlist_lookup_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint16_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-245' name='a'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int16_array' mangled-name='nvlist_lookup_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int16_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-246' name='a'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint8_array' mangled-name='nvlist_lookup_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint8_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-247' name='a'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int8_array' mangled-name='nvlist_lookup_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int8_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-248' name='a'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_byte_array' mangled-name='nvlist_lookup_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_byte_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-249' name='a'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_boolean_array' mangled-name='nvlist_lookup_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_boolean_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-250' name='a'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_nvlist' mangled-name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nvlist'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-161' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_string' mangled-name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_string'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-15' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_double' mangled-name='nvlist_lookup_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_double'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-251' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint64' mangled-name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint64'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-156' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int64' mangled-name='nvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int64'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-153' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint32' mangled-name='nvlist_lookup_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint32'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-150' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int32' mangled-name='nvlist_lookup_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int32'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-147' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint16' mangled-name='nvlist_lookup_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint16'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-144' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int16' mangled-name='nvlist_lookup_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int16'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-141' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint8' mangled-name='nvlist_lookup_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_uint8'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-138' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int8' mangled-name='nvlist_lookup_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_int8'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-135' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_byte' mangled-name='nvlist_lookup_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_byte'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-132' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_boolean_value' mangled-name='nvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_boolean_value'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-128' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_boolean' mangled-name='nvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_boolean'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_type_is_array' mangled-name='nvpair_type_is_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_type_is_array'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_type' mangled-name='nvpair_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_type'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='nvpair_name' mangled-name='nvpair_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_name'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='nvlist_empty' mangled-name='nvlist_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_empty'>
- <parameter type-id='type-id-32' name='nvl'/>
- <return type-id='type-id-87'/>
- </function-decl>
- <function-decl name='nvlist_prev_nvpair' mangled-name='nvlist_prev_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prev_nvpair'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-12'/>
- </function-decl>
- <function-decl name='nvlist_next_nvpair' mangled-name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_next_nvpair'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-12'/>
- </function-decl>
- <function-decl name='nvlist_add_nvlist_array' mangled-name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_nvlist_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-161' name='a'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_nvlist' mangled-name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_nvlist'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-32' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_hrtime' mangled-name='nvlist_add_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_hrtime'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-123' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <qualified-type-def type-id='type-id-14' const='yes' id='type-id-254'/>
- <pointer-type-def type-id='type-id-254' size-in-bits='64' id='type-id-255'/>
- <function-decl name='nvlist_add_string_array' mangled-name='nvlist_add_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_string_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-255' name='a'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_uint64_array' mangled-name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint64_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-156' name='a'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_int64_array' mangled-name='nvlist_add_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int64_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-153' name='a'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_uint32_array' mangled-name='nvlist_add_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint32_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-150' name='a'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_int32_array' mangled-name='nvlist_add_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int32_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-147' name='a'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_uint16_array' mangled-name='nvlist_add_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint16_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-144' name='a'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_int16_array' mangled-name='nvlist_add_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int16_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-141' name='a'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_uint8_array' mangled-name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint8_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-138' name='a'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_int8_array' mangled-name='nvlist_add_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int8_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-135' name='a'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_byte_array' mangled-name='nvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_byte_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-132' name='a'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_boolean_array' mangled-name='nvlist_add_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_boolean_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-128' name='a'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_string' mangled-name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_string'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-36' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_double' mangled-name='nvlist_add_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_double'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-117' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_uint64' mangled-name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint64'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-28' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_int64' mangled-name='nvlist_add_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int64'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-112' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_uint32' mangled-name='nvlist_add_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint32'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-27' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_int32' mangled-name='nvlist_add_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int32'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-3' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_uint16' mangled-name='nvlist_add_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint16'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-104' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_int16' mangled-name='nvlist_add_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int16'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-4' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_uint8' mangled-name='nvlist_add_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_uint8'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-98' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_int8' mangled-name='nvlist_add_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_int8'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-94' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_byte' mangled-name='nvlist_add_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_byte'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-90' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_boolean_value' mangled-name='nvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_boolean_value'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-87' name='val'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_boolean' mangled-name='nvlist_add_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_add_boolean'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_dup' mangled-name='nvlist_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_dup'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-161' name='nvlp'/>
- <parameter type-id='type-id-1' name='kmflag'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_free' mangled-name='nvlist_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_free'>
- <parameter type-id='type-id-32' name='nvl'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_alloc' mangled-name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_alloc'>
- <parameter type-id='type-id-161' name='nvlp'/>
- <parameter type-id='type-id-129' name='nvflag'/>
- <parameter type-id='type-id-1' name='kmflag'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_nvflag' mangled-name='nvlist_nvflag' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_nvflag'>
- <parameter type-id='type-id-32' name='nvl'/>
- <return type-id='type-id-129'/>
- </function-decl>
- <function-decl name='nvlist_lookup_nv_alloc' mangled-name='nvlist_lookup_nv_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_lookup_nv_alloc'>
- <parameter type-id='type-id-32' name='nvl'/>
- <return type-id='type-id-227'/>
- </function-decl>
- <function-decl name='nv_alloc_fini' mangled-name='nv_alloc_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nv_alloc_fini'>
- <parameter type-id='type-id-227' name='nva'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nv_alloc_reset' mangled-name='nv_alloc_reset' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nv_alloc_reset'>
- <parameter type-id='type-id-227' name='nva'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nv_alloc_init' mangled-name='nv_alloc_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nv_alloc_init'>
- <parameter type-id='type-id-227' name='nva'/>
- <parameter type-id='type-id-220' name='nvo'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_xalloc' mangled-name='nvlist_xalloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xalloc'>
- <parameter type-id='type-id-161' name='nvlp'/>
- <parameter type-id='type-id-129' name='nvflag'/>
- <parameter type-id='type-id-227' name='nva'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_remove_nvpair' mangled-name='nvlist_remove_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_remove_nvpair'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_remove_all' mangled-name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_remove_all'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_remove' mangled-name='nvlist_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_remove'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-5' name='type'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_xdup' mangled-name='nvlist_xdup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xdup'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-161' name='nvlp'/>
- <parameter type-id='type-id-227' name='nva'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_size' mangled-name='nvlist_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_size'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-236' name='size'/>
- <parameter type-id='type-id-1' name='encoding'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_xpack' mangled-name='nvlist_xpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_xpack'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-15' name='bufp'/>
- <parameter type-id='type-id-236' name='buflen'/>
- <parameter type-id='type-id-1' name='encoding'/>
- <parameter type-id='type-id-227' name='nva'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='xdr_int' mangled-name='xdr_int' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='strlen' mangled-name='strlen' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='__builtin_memset' mangled-name='memset' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='xdr_string' mangled-name='xdr_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='xdr_longlong_t' mangled-name='xdr_longlong_t' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='xdr_array' mangled-name='xdr_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='xdr_opaque' mangled-name='xdr_opaque' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='xdr_u_longlong_t' mangled-name='xdr_u_longlong_t' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='xdr_double' mangled-name='xdr_double' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='xdr_u_int' mangled-name='xdr_u_int' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='xdr_u_short' mangled-name='xdr_u_short' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='xdr_short' mangled-name='xdr_short' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='xdr_char' mangled-name='xdr_char' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='__builtin_memmove' mangled-name='memmove' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='xdrmem_create' mangled-name='xdrmem_create' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='strncmp' mangled-name='strncmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='strtol' mangled-name='strtol' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/nvpair/fnvpair.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libnvpair' language='LANG_C99'>
- <function-decl name='fnvpair_value_nvlist' mangled-name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_nvlist'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-32'/>
- </function-decl>
- <function-decl name='fnvpair_value_string' mangled-name='fnvpair_value_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_string'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='fnvpair_value_uint64' mangled-name='fnvpair_value_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint64'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-28'/>
- </function-decl>
- <function-decl name='fnvpair_value_uint32' mangled-name='fnvpair_value_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint32'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-27'/>
- </function-decl>
- <function-decl name='fnvpair_value_uint16' mangled-name='fnvpair_value_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint16'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-104'/>
- </function-decl>
- <function-decl name='fnvpair_value_uint8' mangled-name='fnvpair_value_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_uint8'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-98'/>
- </function-decl>
- <function-decl name='fnvpair_value_int64' mangled-name='fnvpair_value_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int64'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-112'/>
- </function-decl>
- <function-decl name='fnvpair_value_int32' mangled-name='fnvpair_value_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int32'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-3'/>
- </function-decl>
- <function-decl name='fnvpair_value_int16' mangled-name='fnvpair_value_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int16'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-4'/>
- </function-decl>
- <function-decl name='fnvpair_value_int8' mangled-name='fnvpair_value_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_int8'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-94'/>
- </function-decl>
- <function-decl name='fnvpair_value_byte' mangled-name='fnvpair_value_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_byte'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-90'/>
- </function-decl>
- <function-decl name='fnvpair_value_boolean_value' mangled-name='fnvpair_value_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvpair_value_boolean_value'>
- <parameter type-id='type-id-12' name='nvp'/>
- <return type-id='type-id-87'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_uint64_array' mangled-name='fnvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint64_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-156'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_int64_array' mangled-name='fnvlist_lookup_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int64_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-153'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_uint32_array' mangled-name='fnvlist_lookup_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint32_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-150'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_int32_array' mangled-name='fnvlist_lookup_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int32_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-147'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_uint16_array' mangled-name='fnvlist_lookup_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint16_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-144'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_int16_array' mangled-name='fnvlist_lookup_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int16_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-141'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_uint8_array' mangled-name='fnvlist_lookup_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint8_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-138'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_int8_array' mangled-name='fnvlist_lookup_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int8_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-135'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_byte_array' mangled-name='fnvlist_lookup_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_byte_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-132'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_boolean_array' mangled-name='fnvlist_lookup_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_boolean_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-239' name='n'/>
- <return type-id='type-id-128'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_nvlist' mangled-name='fnvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_nvlist'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-32'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_string' mangled-name='fnvlist_lookup_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_string'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_uint64' mangled-name='fnvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint64'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-28'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_uint32' mangled-name='fnvlist_lookup_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint32'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-27'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_uint16' mangled-name='fnvlist_lookup_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint16'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-104'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_uint8' mangled-name='fnvlist_lookup_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_uint8'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-98'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_int64' mangled-name='fnvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int64'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-112'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_int32' mangled-name='fnvlist_lookup_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int32'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-3'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_int16' mangled-name='fnvlist_lookup_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int16'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-4'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_int8' mangled-name='fnvlist_lookup_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_int8'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-94'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_byte' mangled-name='fnvlist_lookup_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_byte'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-90'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_boolean_value' mangled-name='fnvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_boolean_value'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-87'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_boolean' mangled-name='fnvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_boolean'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-87'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_nvpair' mangled-name='fnvlist_lookup_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_lookup_nvpair'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-12'/>
- </function-decl>
- <function-decl name='fnvlist_remove_nvpair' mangled-name='fnvlist_remove_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_remove_nvpair'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-12' name='pair'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_remove' mangled-name='fnvlist_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_remove'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_nvlist_array' mangled-name='fnvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_nvlist_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-161' name='val'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_string_array' mangled-name='fnvlist_add_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_string_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-255' name='val'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_uint64_array' mangled-name='fnvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint64_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-156' name='val'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_int64_array' mangled-name='fnvlist_add_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int64_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-153' name='val'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_uint32_array' mangled-name='fnvlist_add_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint32_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-150' name='val'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_int32_array' mangled-name='fnvlist_add_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int32_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-147' name='val'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_uint16_array' mangled-name='fnvlist_add_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint16_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-144' name='val'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_int16_array' mangled-name='fnvlist_add_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int16_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-141' name='val'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_uint8_array' mangled-name='fnvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint8_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-138' name='val'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_int8_array' mangled-name='fnvlist_add_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int8_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-135' name='val'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_byte_array' mangled-name='fnvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_byte_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-132' name='val'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_boolean_array' mangled-name='fnvlist_add_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_boolean_array'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-128' name='val'/>
- <parameter type-id='type-id-129' name='n'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_nvpair' mangled-name='fnvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_nvpair'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-12' name='pair'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_nvlist' mangled-name='fnvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_nvlist'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-32' name='val'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_string' mangled-name='fnvlist_add_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_string'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-36' name='val'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_uint64' mangled-name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint64'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-28' name='val'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_int64' mangled-name='fnvlist_add_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int64'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-112' name='val'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_uint32' mangled-name='fnvlist_add_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint32'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-27' name='val'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_int32' mangled-name='fnvlist_add_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int32'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-3' name='val'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_uint16' mangled-name='fnvlist_add_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint16'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-104' name='val'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_int16' mangled-name='fnvlist_add_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int16'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-4' name='val'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_uint8' mangled-name='fnvlist_add_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_uint8'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-98' name='val'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_int8' mangled-name='fnvlist_add_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_int8'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-94' name='val'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_byte' mangled-name='fnvlist_add_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_byte'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-90' name='val'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_boolean_value' mangled-name='fnvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_boolean_value'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <parameter type-id='type-id-87' name='val'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_add_boolean' mangled-name='fnvlist_add_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_add_boolean'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-36' name='name'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_num_pairs' mangled-name='fnvlist_num_pairs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_num_pairs'>
- <parameter type-id='type-id-32' name='nvl'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='fnvlist_merge' mangled-name='fnvlist_merge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_merge'>
- <parameter type-id='type-id-32' name='dst'/>
- <parameter type-id='type-id-32' name='src'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_dup' mangled-name='fnvlist_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_dup'>
- <parameter type-id='type-id-32' name='nvl'/>
- <return type-id='type-id-32'/>
- </function-decl>
- <function-decl name='fnvlist_unpack' mangled-name='fnvlist_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_unpack'>
- <parameter type-id='type-id-14' name='buf'/>
- <parameter type-id='type-id-20' name='buflen'/>
- <return type-id='type-id-32'/>
- </function-decl>
- <function-decl name='fnvlist_pack_free' mangled-name='fnvlist_pack_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_pack_free'>
- <parameter type-id='type-id-14' name='pack'/>
- <parameter type-id='type-id-20' name='size'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_pack' mangled-name='fnvlist_pack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_pack'>
- <parameter type-id='type-id-32' name='nvl'/>
- <parameter type-id='type-id-236' name='sizep'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='fnvlist_size' mangled-name='fnvlist_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_size'>
- <parameter type-id='type-id-32' name='nvl'/>
- <return type-id='type-id-20'/>
- </function-decl>
- <function-decl name='fnvlist_free' mangled-name='fnvlist_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_free'>
- <parameter type-id='type-id-32' name='nvl'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='fnvlist_alloc' mangled-name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fnvlist_alloc'>
- <return type-id='type-id-32'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint64_array' mangled-name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int64_array' mangled-name='nvlist_lookup_int64_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint32_array' mangled-name='nvlist_lookup_uint32_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int32_array' mangled-name='nvlist_lookup_int32_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint16_array' mangled-name='nvlist_lookup_uint16_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int16_array' mangled-name='nvlist_lookup_int16_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint8_array' mangled-name='nvlist_lookup_uint8_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int8_array' mangled-name='nvlist_lookup_int8_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_byte_array' mangled-name='nvlist_lookup_byte_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_boolean_array' mangled-name='nvlist_lookup_boolean_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_nvlist' mangled-name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_string' mangled-name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint64' mangled-name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint32' mangled-name='nvlist_lookup_uint32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint16' mangled-name='nvlist_lookup_uint16' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint8' mangled-name='nvlist_lookup_uint8' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int64' mangled-name='nvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int32' mangled-name='nvlist_lookup_int32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int16' mangled-name='nvlist_lookup_int16' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int8' mangled-name='nvlist_lookup_int8' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <var-decl name='op' type-id='8a1fb33a' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uchar_t' type-id='002ac4a6' id='d8bf0010'/>
+ <class-decl name='__anonymous_struct__3' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='5636b8e3'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='506696a8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='int8_t' type-id='2171a512' id='ee31ee44'/>
+ <typedef-decl name='__int8_t' type-id='28577a57' id='2171a512'/>
+ <class-decl name='__anonymous_struct__4' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='0119a618'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='39b623f9' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uint8_t' type-id='c51d6389' id='b96825af'/>
+ <typedef-decl name='__uint8_t' type-id='002ac4a6' id='c51d6389'/>
+ <class-decl name='__anonymous_struct__5' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='4657e0ba'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='ea6be4eb' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__6' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='ecfe67d7'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='f10f1e84' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uint16_t' type-id='253c2d2a' id='149c6638'/>
+ <typedef-decl name='__uint16_t' type-id='8efea9e5' id='253c2d2a'/>
+ <class-decl name='__anonymous_struct__7' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='8947fe4c'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='1708018d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__8' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='365a6549'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='90174072' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__9' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='d6ce379b'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='d2af7f32' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='int64_t' type-id='0c9942d2' id='9da381c4'/>
+ <typedef-decl name='__int64_t' type-id='bd54fe1a' id='0c9942d2'/>
+ <class-decl name='__anonymous_struct__10' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='bb34572a'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='0b22f759' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__11' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='ef32d857'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='3be4d568' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__12' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='f6ce752a'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='c0d0f877' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__13' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='c61b59cf'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='e1c54c3c' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='hrtime_t' type-id='1eb56b1e' id='cebdd548'/>
+ <class-decl name='__anonymous_struct__14' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='1178977f'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='19ea27ae' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__15' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='15d12763'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='7ef0e988' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uint_t' type-id='f0981eeb' id='3502e3ff'/>
+ <class-decl name='__anonymous_struct__16' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='4207d3e6'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='7391ed39' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__17' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='e4cdea78'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='42257af5' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__18' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='252509cf'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='330cc0d0' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__19' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='3cf98639'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='506ab59a' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__20' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='060bdb18'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='ed6a3a3d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__21' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='bbaa8a1b'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='750cc41c' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__22' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='745b46ee'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='292cdbcf' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__23' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='223df2d6'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='aaea91b5' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__24' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='f564486f'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='7e85a9b6' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__25' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='f15f91ac'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='de20bf07' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='__anonymous_struct__26' size-in-bits='128' is-struct='yes' is-anonymous='yes' visibility='default' id='f885c1bf'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='op' type-id='2835af80' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <enum-decl name='nvlist_prtctl_fmt' id='c8dcc53a'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='NVLIST_FMT_MEMBER_NAME' value='0'/>
+ <enumerator name='NVLIST_FMT_MEMBER_POSTAMBLE' value='1'/>
+ <enumerator name='NVLIST_FMT_BTWN_ARRAY' value='2'/>
+ </enum-decl>
+ <pointer-type-def type-id='aa12d1ba' size-in-bits='64' id='822cd80b'/>
+ <pointer-type-def type-id='ec1ed955' size-in-bits='64' id='dca988a5'/>
+ <pointer-type-def type-id='bb4788fa' size-in-bits='64' id='cecf4ea7'/>
+ <pointer-type-def type-id='010ae0b9' size-in-bits='64' id='e4c6fa61'/>
+ <pointer-type-def type-id='c19b74c3' size-in-bits='64' id='37e3bd22'/>
+ <pointer-type-def type-id='a84c031d' size-in-bits='64' id='26a90f95'/>
+ <pointer-type-def type-id='26a90f95' size-in-bits='64' id='9b23c9ad'/>
+ <qualified-type-def type-id='a84c031d' const='yes' id='9b45d938'/>
+ <pointer-type-def type-id='9b45d938' size-in-bits='64' id='80f4b756'/>
+ <pointer-type-def type-id='9f88f76e' size-in-bits='64' id='7ef0e988'/>
+ <pointer-type-def type-id='c5bb1a2b' size-in-bits='64' id='c0d0f877'/>
+ <pointer-type-def type-id='573fea1b' size-in-bits='64' id='de20bf07'/>
+ <pointer-type-def type-id='70284cc6' size-in-bits='64' id='3be4d568'/>
+ <pointer-type-def type-id='700c3bca' size-in-bits='64' id='6d994334'/>
+ <pointer-type-def type-id='18ac1860' size-in-bits='64' id='506ab59a'/>
+ <pointer-type-def type-id='328fee42' size-in-bits='64' id='750cc41c'/>
+ <pointer-type-def type-id='7ba5cd31' size-in-bits='64' id='aaea91b5'/>
+ <pointer-type-def type-id='a86d8029' size-in-bits='64' id='42257af5'/>
+ <pointer-type-def type-id='0b4eb914' size-in-bits='64' id='19ea27ae'/>
+ <pointer-type-def type-id='c6c8144e' size-in-bits='64' id='2835af80'/>
+ <pointer-type-def type-id='20f7b475' size-in-bits='64' id='6a2f50c1'/>
+ <pointer-type-def type-id='102ee17a' size-in-bits='64' id='e1c54c3c'/>
+ <pointer-type-def type-id='49b69c77' size-in-bits='64' id='ea6be4eb'/>
+ <pointer-type-def type-id='cb5d50f1' size-in-bits='64' id='1708018d'/>
+ <pointer-type-def type-id='880d56b8' size-in-bits='64' id='d2af7f32'/>
+ <pointer-type-def type-id='a739bfc6' size-in-bits='64' id='506696a8'/>
+ <pointer-type-def type-id='234f35e8' size-in-bits='64' id='8a1fb33a'/>
+ <pointer-type-def type-id='41f7168a' size-in-bits='64' id='f10f1e84'/>
+ <pointer-type-def type-id='e8d6e508' size-in-bits='64' id='90174072'/>
+ <pointer-type-def type-id='f3daafe5' size-in-bits='64' id='0b22f759'/>
+ <pointer-type-def type-id='17ab04ad' size-in-bits='64' id='39b623f9'/>
+ <pointer-type-def type-id='256cdd75' size-in-bits='64' id='7391ed39'/>
+ <pointer-type-def type-id='cc10a041' size-in-bits='64' id='ed6a3a3d'/>
+ <pointer-type-def type-id='9fd269d3' size-in-bits='64' id='292cdbcf'/>
+ <pointer-type-def type-id='3bd73b0c' size-in-bits='64' id='7e85a9b6'/>
+ <pointer-type-def type-id='0d445e26' size-in-bits='64' id='330cc0d0'/>
+ <pointer-type-def type-id='e4b89f30' size-in-bits='64' id='ed8aa8ba'/>
+ <pointer-type-def type-id='be7f4941' size-in-bits='64' id='2809de35'/>
+ <pointer-type-def type-id='fe5ae69d' size-in-bits='64' id='90d5edb9'/>
+ <pointer-type-def type-id='2783af3c' size-in-bits='64' id='e44553b6'/>
+ <pointer-type-def type-id='33c6e3d8' size-in-bits='64' id='1263777a'/>
+ <pointer-type-def type-id='dadb9eca' size-in-bits='64' id='cbda43ac'/>
+ <pointer-type-def type-id='55b9e070' size-in-bits='64' id='b3fae562'/>
+ <pointer-type-def type-id='8e63c78b' size-in-bits='64' id='8b41e457'/>
+ <pointer-type-def type-id='c542ed33' size-in-bits='64' id='f9668a57'/>
+ <pointer-type-def type-id='5dea179a' size-in-bits='64' id='001d8764'/>
+ <pointer-type-def type-id='b6f659a0' size-in-bits='64' id='44f188f2'/>
+ <pointer-type-def type-id='2765bd17' size-in-bits='64' id='976f721b'/>
+ <pointer-type-def type-id='9e073b5c' size-in-bits='64' id='ee62ad8e'/>
+ <pointer-type-def type-id='2c785071' size-in-bits='64' id='957d9f35'/>
+ <pointer-type-def type-id='aad19bf7' size-in-bits='64' id='4db8acf3'/>
+ <pointer-type-def type-id='0660e71a' size-in-bits='64' id='0ca7b13c'/>
+ <pointer-type-def type-id='250287b8' size-in-bits='64' id='a91bad5a'/>
+ <pointer-type-def type-id='e7344862' size-in-bits='64' id='519bf35c'/>
+ <pointer-type-def type-id='32b6d968' size-in-bits='64' id='92988dea'/>
+ <pointer-type-def type-id='5c975642' size-in-bits='64' id='7f8ee7e4'/>
+ <pointer-type-def type-id='0155b993' size-in-bits='64' id='2c8c4457'/>
+ <pointer-type-def type-id='6e8b02cb' size-in-bits='64' id='eb944897'/>
+ <pointer-type-def type-id='d434b7d7' size-in-bits='64' id='108e6453'/>
+ <pointer-type-def type-id='c645e10f' size-in-bits='64' id='5cbe16ab'/>
+ <pointer-type-def type-id='de41f295' size-in-bits='64' id='d94cdfa1'/>
+ <pointer-type-def type-id='b2fbf64a' size-in-bits='64' id='470a7fd4'/>
+ <pointer-type-def type-id='cc22d314' size-in-bits='64' id='eddda806'/>
+ <pointer-type-def type-id='23bd8cb5' size-in-bits='64' id='f76f73d0'/>
+ <pointer-type-def type-id='3ff5601b' size-in-bits='64' id='4aafb922'/>
+ <pointer-type-def type-id='9da381c4' size-in-bits='64' id='cb785ebf'/>
+ <pointer-type-def type-id='ee31ee44' size-in-bits='64' id='256d5229'/>
+ <pointer-type-def type-id='ebc6735b' size-in-bits='64' id='7be54adb'/>
+ <pointer-type-def type-id='d2e8bad9' size-in-bits='64' id='196db161'/>
+ <pointer-type-def type-id='8e8d4be3' size-in-bits='64' id='5ce45b60'/>
+ <pointer-type-def type-id='5ce45b60' size-in-bits='64' id='857bb57e'/>
+ <pointer-type-def type-id='57928edf' size-in-bits='64' id='3fa542f0'/>
+ <pointer-type-def type-id='aca3bac8' size-in-bits='64' id='d33f11cb'/>
+ <pointer-type-def type-id='d8bf0010' size-in-bits='64' id='45b65157'/>
+ <pointer-type-def type-id='149c6638' size-in-bits='64' id='8a121f49'/>
+ <pointer-type-def type-id='8f92235e' size-in-bits='64' id='90421557'/>
+ <pointer-type-def type-id='9c313c2d' size-in-bits='64' id='5d6479ae'/>
+ <pointer-type-def type-id='b96825af' size-in-bits='64' id='ae3e8ca6'/>
+ <pointer-type-def type-id='002ac4a6' size-in-bits='64' id='cf536864'/>
+ <pointer-type-def type-id='48b5725f' size-in-bits='64' id='eaa32e2f'/>
+ <function-decl name='nvpair_value_match' mangled-name='nvpair_value_match' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_match'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='95e97e5e' name='ai'/>
+ <parameter type-id='26a90f95' name='value'/>
+ <parameter type-id='9b23c9ad' name='ep'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='nvlist_lookup_byte' mangled-name='nvlist_lookup_byte' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvpair_value_match_regex' mangled-name='nvpair_value_match_regex' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvpair_value_match_regex'>
+ <parameter type-id='3fa542f0' name='nvp'/>
+ <parameter type-id='95e97e5e' name='ai'/>
+ <parameter type-id='26a90f95' name='value'/>
+ <parameter type-id='d33f11cb' name='value_regex'/>
+ <parameter type-id='9b23c9ad' name='ep'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='nvlist_lookup_boolean_value' mangled-name='nvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='dump_nvlist' mangled-name='dump_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dump_nvlist'>
+ <parameter type-id='5ce45b60' name='list'/>
+ <parameter type-id='95e97e5e' name='indent'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_lookup_boolean' mangled-name='nvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prt' mangled-name='nvlist_prt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prt'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_lookup_nvpair' mangled-name='nvlist_lookup_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_print' mangled-name='nvlist_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_print'>
+ <parameter type-id='822cd80b' name='fp'/>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_remove_nvpair' mangled-name='nvlist_remove_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctl_free' mangled-name='nvlist_prtctl_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_free'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_remove_all' mangled-name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctl_alloc' mangled-name='nvlist_prtctl_alloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_alloc'>
+ <return type-id='b0c1ff8d'/>
</function-decl>
- <function-decl name='nvlist_add_nvlist_array' mangled-name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_nvlist_array' mangled-name='nvlist_prtctlop_nvlist_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_nvlist_array'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='44f188f2' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_string_array' mangled-name='nvlist_add_string_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_string_array' mangled-name='nvlist_prtctlop_string_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_string_array'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='90d5edb9' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_uint64_array' mangled-name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_uint64_array' mangled-name='nvlist_prtctlop_uint64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint64_array'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='470a7fd4' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_int64_array' mangled-name='nvlist_add_int64_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_int64_array' mangled-name='nvlist_prtctlop_int64_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int64_array'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='8b41e457' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_uint32_array' mangled-name='nvlist_add_uint32_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_uint32_array' mangled-name='nvlist_prtctlop_uint32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint32_array'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='d94cdfa1' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_int32_array' mangled-name='nvlist_add_int32_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_int32_array' mangled-name='nvlist_prtctlop_int32_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int32_array'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='b3fae562' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_uint16_array' mangled-name='nvlist_add_uint16_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_uint16_array' mangled-name='nvlist_prtctlop_uint16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint16_array'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='5cbe16ab' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_int16_array' mangled-name='nvlist_add_int16_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_int16_array' mangled-name='nvlist_prtctlop_int16_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int16_array'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='cbda43ac' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_uint8_array' mangled-name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_uint8_array' mangled-name='nvlist_prtctlop_uint8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint8_array'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='eddda806' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_int8_array' mangled-name='nvlist_add_int8_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_int8_array' mangled-name='nvlist_prtctlop_int8_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int8_array'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='f9668a57' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_byte_array' mangled-name='nvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_byte_array' mangled-name='nvlist_prtctlop_byte_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_byte_array'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='108e6453' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_boolean_array' mangled-name='nvlist_add_boolean_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_boolean_array' mangled-name='nvlist_prtctlop_boolean_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean_array'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='ed8aa8ba' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_nvpair' mangled-name='nvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_nvlist' mangled-name='nvlist_prtctlop_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_nvlist'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='001d8764' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_nvlist' mangled-name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_hrtime' mangled-name='nvlist_prtctlop_hrtime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_hrtime'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='ee62ad8e' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_string' mangled-name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_string' mangled-name='nvlist_prtctlop_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_string'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='2809de35' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_uint64' mangled-name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_double' mangled-name='nvlist_prtctlop_double' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_double'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='e44553b6' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_int64' mangled-name='nvlist_add_int64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_uint64' mangled-name='nvlist_prtctlop_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint64'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='2c8c4457' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_uint32' mangled-name='nvlist_add_uint32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_int64' mangled-name='nvlist_prtctlop_int64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int64'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='0ca7b13c' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_int32' mangled-name='nvlist_add_int32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_uint32' mangled-name='nvlist_prtctlop_uint32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint32'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='7f8ee7e4' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_uint16' mangled-name='nvlist_add_uint16' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_int32' mangled-name='nvlist_prtctlop_int32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int32'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='4db8acf3' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_int16' mangled-name='nvlist_add_int16' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_uint16' mangled-name='nvlist_prtctlop_uint16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint16'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='92988dea' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_uint8' mangled-name='nvlist_add_uint8' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_int16' mangled-name='nvlist_prtctlop_int16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int16'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='957d9f35' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_int8' mangled-name='nvlist_add_int8' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_uint8' mangled-name='nvlist_prtctlop_uint8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_uint8'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='eb944897' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_byte' mangled-name='nvlist_add_byte' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_int8' mangled-name='nvlist_prtctlop_int8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_int8'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='a91bad5a' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_boolean_value' mangled-name='nvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_byte' mangled-name='nvlist_prtctlop_byte' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_byte'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='519bf35c' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_boolean' mangled-name='nvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_boolean_value' mangled-name='nvlist_prtctlop_boolean_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean_value'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='976f721b' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_merge' mangled-name='nvlist_merge' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctlop_boolean' mangled-name='nvlist_prtctlop_boolean' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctlop_boolean'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='1263777a' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_dup' mangled-name='nvlist_dup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctl_dofmt' mangled-name='nvlist_prtctl_dofmt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_dofmt'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='c8dcc53a' name='which'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_unpack' mangled-name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctl_setfmt' mangled-name='nvlist_prtctl_setfmt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setfmt'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='c8dcc53a' name='which'/>
+ <parameter type-id='80f4b756' name='fmt'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_pack' mangled-name='nvlist_pack' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctl_doindent' mangled-name='nvlist_prtctl_doindent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_doindent'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='95e97e5e' name='onemore'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_size' mangled-name='nvlist_size' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctl_setindent' mangled-name='nvlist_prtctl_setindent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setindent'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='628aafab' name='mode'/>
+ <parameter type-id='95e97e5e' name='start'/>
+ <parameter type-id='95e97e5e' name='inc'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_free' mangled-name='nvlist_free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctl_getdest' mangled-name='nvlist_prtctl_getdest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_getdest'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <return type-id='822cd80b'/>
</function-decl>
- <function-decl name='nvlist_alloc' mangled-name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <function-decl name='nvlist_prtctl_setdest' mangled-name='nvlist_prtctl_setdest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_prtctl_setdest'>
+ <parameter type-id='b0c1ff8d' name='pctl'/>
+ <parameter type-id='822cd80b' name='fp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='9f88f76e'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='37e3bd22'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='c5bb1a2b'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='26a90f95'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='573fea1b'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='9b23c9ad'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='70284cc6'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='a0eb0f08'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='700c3bca'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='95e97e5e'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='18ac1860'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='f76f73d0'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='328fee42'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='4aafb922'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='7ba5cd31'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='cb785ebf'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='a86d8029'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='256d5229'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='0b4eb914'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='5ce45b60'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='c6c8144e'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='857bb57e'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='20f7b475'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='c19b74c3'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='102ee17a'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='cebdd548'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='49b69c77'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='23bd8cb5'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='cb5d50f1'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='3ff5601b'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='880d56b8'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='9da381c4'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='a739bfc6'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='ee31ee44'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='234f35e8'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='d8bf0010'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='41f7168a'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='149c6638'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='e8d6e508'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='8f92235e'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='f3daafe5'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='9c313c2d'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='17ab04ad'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='b96825af'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='256cdd75'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='45b65157'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='cc10a041'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='8a121f49'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='9fd269d3'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='90421557'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='3bd73b0c'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='5d6479ae'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='0d445e26'>
+ <parameter type-id='196db161'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='ae3e8ca6'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='e4b89f30'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='37e3bd22'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='be7f4941'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='26a90f95'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='fe5ae69d'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='9b23c9ad'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='2783af3c'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='a0eb0f08'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='33c6e3d8'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='95e97e5e'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='dadb9eca'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='f76f73d0'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='55b9e070'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='4aafb922'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='8e63c78b'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='cb785ebf'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='c542ed33'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='256d5229'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='5dea179a'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='5ce45b60'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='b6f659a0'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='857bb57e'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='2765bd17'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='c19b74c3'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='9e073b5c'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='cebdd548'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='2c785071'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='23bd8cb5'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='aad19bf7'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='3ff5601b'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='0660e71a'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='9da381c4'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='250287b8'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='ee31ee44'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='e7344862'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='d8bf0010'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='32b6d968'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='149c6638'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='5c975642'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='8f92235e'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='0155b993'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='9c313c2d'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='6e8b02cb'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='b96825af'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='d434b7d7'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='45b65157'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='c645e10f'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='8a121f49'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='de41f295'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='90421557'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='b2fbf64a'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='5d6479ae'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='cc22d314'>
+ <parameter type-id='b0c1ff8d'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='ae3e8ca6'/>
+ <parameter type-id='3502e3ff'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libnvpair_json.c' language='LANG_C99'>
+ <function-decl name='nvlist_print_json' mangled-name='nvlist_print_json' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='nvlist_print_json'>
+ <parameter type-id='822cd80b' name='fp'/>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='assert.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <var-decl name='libspl_assert_ok' type-id='type-id-1' mangled-name='libspl_assert_ok' visibility='default' elf-symbol-id='libspl_assert_ok'/>
+ <abi-instr version='1.0' address-size='64' path='nvpair_alloc_system.c' language='LANG_C99'>
+ <class-decl name='__va_list_tag' size-in-bits='192' is-struct='yes' visibility='default' id='d5027220'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='gp_offset' type-id='f0981eeb' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='fp_offset' type-id='f0981eeb' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='overflow_arg_area' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='reg_save_area' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='nv_alloc' size-in-bits='128' is-struct='yes' visibility='default' id='98213087'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='nva_ops' type-id='ee1d4944' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='nva_arg' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='nv_alloc_ops_t' type-id='8f6cc4f4' id='03e8ffd6'/>
+ <class-decl name='nv_alloc_ops' size-in-bits='320' is-struct='yes' visibility='default' id='8f6cc4f4'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='nv_ao_init' type-id='76da8447' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='nv_ao_fini' type-id='fe356f6f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='nv_ao_alloc' type-id='9ff7f508' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='nv_ao_free' type-id='520da3f4' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='nv_ao_reset' type-id='fe356f6f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='nv_alloc_t' type-id='98213087' id='cca08635'/>
+ <pointer-type-def type-id='d5027220' size-in-bits='64' id='b7f2d5e6'/>
+ <qualified-type-def type-id='03e8ffd6' const='yes' id='aca16c06'/>
+ <pointer-type-def type-id='aca16c06' size-in-bits='64' id='ee1d4944'/>
+ <pointer-type-def type-id='e9ff7293' size-in-bits='64' id='76da8447'/>
+ <pointer-type-def type-id='cca08635' size-in-bits='64' id='11871392'/>
+ <pointer-type-def type-id='51a21b4b' size-in-bits='64' id='fe356f6f'/>
+ <pointer-type-def type-id='1169c032' size-in-bits='64' id='520da3f4'/>
+ <pointer-type-def type-id='9fff962e' size-in-bits='64' id='9ff7f508'/>
+ <var-decl name='nv_alloc_nosleep' type-id='11871392' mangled-name='nv_alloc_nosleep' visibility='default' elf-symbol-id='nv_alloc_nosleep'/>
+ <function-type size-in-bits='64' id='e9ff7293'>
+ <parameter type-id='11871392'/>
+ <parameter type-id='b7f2d5e6'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='1169c032'>
+ <parameter type-id='11871392'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='b59d7dce'/>
+ <return type-id='48b5725f'/>
+ </function-type>
+ <function-type size-in-bits='64' id='9fff962e'>
+ <parameter type-id='11871392'/>
+ <parameter type-id='b59d7dce'/>
+ <return type-id='eaa32e2f'/>
+ </function-type>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='assert.c' language='LANG_C99'>
+ <var-decl name='libspl_assert_ok' type-id='95e97e5e' mangled-name='libspl_assert_ok' visibility='default' elf-symbol-id='libspl_assert_ok'/>
<function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_assertf'>
- <parameter type-id='type-id-36' name='file'/>
- <parameter type-id='type-id-36' name='func'/>
- <parameter type-id='type-id-1' name='line'/>
- <parameter type-id='type-id-36' name='format'/>
+ <parameter type-id='80f4b756' name='file'/>
+ <parameter type-id='80f4b756' name='func'/>
+ <parameter type-id='95e97e5e' name='line'/>
+ <parameter type-id='80f4b756' name='format'/>
<parameter is-variadic='yes'/>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='__vfprintf_chk' mangled-name='__vfprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
- </function-decl>
- <function-decl name='abort' mangled-name='abort' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-25'/>
+ <return type-id='48b5725f'/>
</function-decl>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libspl/include/sys/feature_tests.h b/sys/contrib/openzfs/lib/libspl/include/sys/feature_tests.h
index 1a68b75f0cdc..c9564b2c3269 100644
--- a/sys/contrib/openzfs/lib/libspl/include/sys/feature_tests.h
+++ b/sys/contrib/openzfs/lib/libspl/include/sys/feature_tests.h
@@ -1,32 +1,41 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2006 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
#ifndef _SYS_FEATURE_TESTS_H
#define _SYS_FEATURE_TESTS_H
-#define __NORETURN __attribute__((__noreturn__))
+#define ____cacheline_aligned
+#define __NORETURN __attribute__((__noreturn__))
+
+#if !defined(fallthrough) && !defined(_LIBCPP_VERSION)
+#if defined(HAVE_IMPLICIT_FALLTHROUGH)
+#define fallthrough __attribute__((__fallthrough__))
+#else
+#define fallthrough ((void)0)
+#endif
+#endif
#endif
diff --git a/sys/contrib/openzfs/lib/libuutil/libuutil.abi b/sys/contrib/openzfs/lib/libuutil/libuutil.abi
index 7737b10fa44d..d61416d5b99b 100644
--- a/sys/contrib/openzfs/lib/libuutil/libuutil.abi
+++ b/sys/contrib/openzfs/lib/libuutil/libuutil.abi
@@ -1,2087 +1,1905 @@
<abi-corpus architecture='elf-amd-x86_64' soname='libuutil.so.3'>
<elf-needed>
<dependency name='libpthread.so.0'/>
<dependency name='libc.so.6'/>
<dependency name='ld-linux-x86-64.so.2'/>
</elf-needed>
<elf-function-symbols>
<elf-symbol name='_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='_sol_getmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_clear_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_set_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy_nodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_first' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert_here' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_last' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_nearest' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_numnodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_swap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_gt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_lt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_system_hostid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getexecname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getextmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getmntany' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getzoneid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_assertf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_after' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_before' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_active' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_replace' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_move_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_consumer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_enter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_exit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_producer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='mkdirp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='print_timestamp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spl_pagesize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcpy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_alt_exit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_first' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_insert' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_last' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_lockup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_nearest_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_nearest_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_node_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_node_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_numnodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_pool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_pool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_release' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_teardown' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_walk_end' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_walk_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_avl_walk_start' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_check_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_die' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_fatal' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_ok' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_usage' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_getpname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_first' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_insert' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_insert_after' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_insert_before' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_last' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_lockup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_nearest_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_nearest_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_node_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_node_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_numnodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_pool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_pool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_release' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_teardown' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_walk_end' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_walk_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_list_walk_start' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_memdup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_msprintf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_panic' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_set_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_setpname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strbw' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strcaseeq' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strdup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_streq' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strerror' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_strndup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_vdie' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_vwarn' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_vxdie' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_warn' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_xdie' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_zalloc' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
<elf-variable-symbols>
<elf-symbol name='libspl_assert_ok' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_fatal_value' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_ok_value' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='uu_exit_usage_value' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-variable-symbols>
- <abi-instr version='1.0' address-size='64' path='uu_alloc.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
- <type-decl name='char' size-in-bits='8' id='type-id-1'/>
- <pointer-type-def type-id='type-id-1' size-in-bits='64' id='type-id-2'/>
- <qualified-type-def type-id='type-id-1' const='yes' id='type-id-3'/>
- <pointer-type-def type-id='type-id-3' size-in-bits='64' id='type-id-4'/>
- <function-decl name='uu_msprintf' mangled-name='uu_msprintf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_msprintf'>
- <parameter type-id='type-id-4' name='format'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <type-decl name='void' id='type-id-5'/>
- <pointer-type-def type-id='type-id-5' size-in-bits='64' id='type-id-6'/>
- <type-decl name='unsigned long int' size-in-bits='64' id='type-id-7'/>
- <typedef-decl name='size_t' type-id='type-id-7' id='type-id-8'/>
- <function-decl name='uu_memdup' mangled-name='uu_memdup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_memdup'>
- <parameter type-id='type-id-6' name='buf'/>
- <parameter type-id='type-id-8' name='sz'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_strndup' mangled-name='uu_strndup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strndup'>
- <parameter type-id='type-id-4' name='s'/>
- <parameter type-id='type-id-8' name='n'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='uu_strdup' mangled-name='uu_strdup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strdup'>
- <parameter type-id='type-id-4' name='str'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='uu_free' mangled-name='uu_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_free'>
- <parameter type-id='type-id-6' name='p'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_zalloc' mangled-name='uu_zalloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_zalloc'>
- <parameter type-id='type-id-8' name='n'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='__builtin___vsnprintf_chk' mangled-name='__vsnprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='__stack_chk_fail' mangled-name='__stack_chk_fail' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='__builtin_memcpy' mangled-name='memcpy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='strnlen' mangled-name='strnlen' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='strlen' mangled-name='strlen' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='free' mangled-name='free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='__builtin_calloc' mangled-name='calloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_set_error' mangled-name='uu_set_error' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='uu_avl.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
- <function-decl name='uu_avl_release' mangled-name='uu_avl_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_release'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_avl_lockup' mangled-name='uu_avl_lockup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_lockup'>
- <return type-id='type-id-5'/>
- </function-decl>
- <class-decl name='uu_avl' size-in-bits='960' is-struct='yes' visibility='default' id='type-id-9'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='ua_next_enc' type-id='type-id-10' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='ua_prev_enc' type-id='type-id-10' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='ua_pool' type-id='type-id-11' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='ua_parent_enc' type-id='type-id-10' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='ua_debug' type-id='type-id-12' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='264'>
- <var-decl name='ua_index' type-id='type-id-12' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='ua_tree' type-id='type-id-13' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='ua_null_walk' type-id='type-id-14' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='uintptr_t' type-id='type-id-7' id='type-id-10'/>
- <class-decl name='uu_avl_pool' size-in-bits='2176' is-struct='yes' visibility='default' id='type-id-15'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='uap_next' type-id='type-id-11' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='uap_prev' type-id='type-id-11' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='uap_name' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='uap_nodeoffset' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='uap_objsize' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='uap_cmp' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='uap_debug' type-id='type-id-12' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='840'>
- <var-decl name='uap_last_index' type-id='type-id-12' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='uap_lock' type-id='type-id-18' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='uap_null_avl' type-id='type-id-19' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='uu_avl_pool_t' type-id='type-id-15' id='type-id-20'/>
- <pointer-type-def type-id='type-id-20' size-in-bits='64' id='type-id-11'/>
-
- <array-type-def dimensions='1' type-id='type-id-1' size-in-bits='512' id='type-id-16'>
- <subrange length='64' type-id='type-id-7' id='type-id-21'/>
-
- </array-type-def>
- <type-decl name='int' size-in-bits='32' id='type-id-22'/>
- <typedef-decl name='uu_compare_fn_t' type-id='type-id-23' id='type-id-24'/>
- <pointer-type-def type-id='type-id-24' size-in-bits='64' id='type-id-17'/>
- <type-decl name='unsigned char' size-in-bits='8' id='type-id-25'/>
- <typedef-decl name='__uint8_t' type-id='type-id-25' id='type-id-26'/>
- <typedef-decl name='uint8_t' type-id='type-id-26' id='type-id-12'/>
- <union-decl name='__anonymous_union__' size-in-bits='320' is-anonymous='yes' visibility='default' id='type-id-27'>
- <data-member access='private'>
- <var-decl name='__data' type-id='type-id-28' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='__size' type-id='type-id-29' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='__align' type-id='type-id-30' visibility='default'/>
- </data-member>
- </union-decl>
- <class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-28'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__lock' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='__count' type-id='type-id-31' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='__owner' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='__nusers' type-id='type-id-31' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='__kind' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='__spins' type-id='type-id-32' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='176'>
- <var-decl name='__elision' type-id='type-id-32' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='__list' type-id='type-id-33' visibility='default'/>
- </data-member>
- </class-decl>
- <type-decl name='unsigned int' size-in-bits='32' id='type-id-31'/>
- <type-decl name='short int' size-in-bits='16' id='type-id-32'/>
- <class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-34'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__prev' type-id='type-id-35' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='__next' type-id='type-id-35' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-34' size-in-bits='64' id='type-id-35'/>
- <typedef-decl name='__pthread_list_t' type-id='type-id-34' id='type-id-33'/>
-
- <array-type-def dimensions='1' type-id='type-id-1' size-in-bits='320' id='type-id-29'>
- <subrange length='40' type-id='type-id-7' id='type-id-36'/>
-
- </array-type-def>
- <type-decl name='long int' size-in-bits='64' id='type-id-30'/>
- <typedef-decl name='pthread_mutex_t' type-id='type-id-27' id='type-id-18'/>
- <typedef-decl name='uu_avl_t' type-id='type-id-9' id='type-id-19'/>
- <class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-13'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='avl_root' type-id='type-id-37' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='avl_compar' type-id='type-id-38' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='avl_offset' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='avl_numnodes' type-id='type-id-39' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='avl_pad' type-id='type-id-8' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-40'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='avl_child' type-id='type-id-41' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='avl_pcb' type-id='type-id-10' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-40' size-in-bits='64' id='type-id-37'/>
-
- <array-type-def dimensions='1' type-id='type-id-37' size-in-bits='128' id='type-id-41'>
- <subrange length='2' type-id='type-id-7' id='type-id-42'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-43' size-in-bits='64' id='type-id-38'/>
- <typedef-decl name='ulong_t' type-id='type-id-7' id='type-id-39'/>
- <class-decl name='uu_avl_walk' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-44'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='uaw_next' type-id='type-id-45' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='uaw_prev' type-id='type-id-45' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='uaw_avl' type-id='type-id-46' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='uaw_next_result' type-id='type-id-6' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='uaw_dir' type-id='type-id-47' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='264'>
- <var-decl name='uaw_robust' type-id='type-id-12' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='uu_avl_walk_t' type-id='type-id-44' id='type-id-14'/>
- <pointer-type-def type-id='type-id-14' size-in-bits='64' id='type-id-45'/>
- <pointer-type-def type-id='type-id-19' size-in-bits='64' id='type-id-46'/>
- <type-decl name='signed char' size-in-bits='8' id='type-id-48'/>
- <typedef-decl name='__int8_t' type-id='type-id-48' id='type-id-49'/>
- <typedef-decl name='int8_t' type-id='type-id-49' id='type-id-47'/>
- <typedef-decl name='uu_avl_index_t' type-id='type-id-10' id='type-id-50'/>
- <function-decl name='uu_avl_nearest_prev' mangled-name='uu_avl_nearest_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_nearest_prev'>
- <parameter type-id='type-id-46' name='ap'/>
- <parameter type-id='type-id-50' name='idx'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_avl_nearest_next' mangled-name='uu_avl_nearest_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_nearest_next'>
- <parameter type-id='type-id-46' name='ap'/>
- <parameter type-id='type-id-50' name='idx'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_avl_insert' mangled-name='uu_avl_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_insert'>
- <parameter type-id='type-id-46' name='ap'/>
- <parameter type-id='type-id-6' name='elem'/>
- <parameter type-id='type-id-50' name='idx'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <pointer-type-def type-id='type-id-50' size-in-bits='64' id='type-id-51'/>
- <function-decl name='uu_avl_find' mangled-name='uu_avl_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_find'>
- <parameter type-id='type-id-46' name='ap'/>
- <parameter type-id='type-id-6' name='elem'/>
- <parameter type-id='type-id-6' name='private'/>
- <parameter type-id='type-id-51' name='out'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <pointer-type-def type-id='type-id-6' size-in-bits='64' id='type-id-52'/>
- <function-decl name='uu_avl_teardown' mangled-name='uu_avl_teardown' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_teardown'>
- <parameter type-id='type-id-46' name='ap'/>
- <parameter type-id='type-id-52' name='cookie'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_avl_remove' mangled-name='uu_avl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_remove'>
- <parameter type-id='type-id-46' name='ap'/>
- <parameter type-id='type-id-6' name='elem'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <typedef-decl name='uu_walk_fn_t' type-id='type-id-43' id='type-id-53'/>
- <pointer-type-def type-id='type-id-53' size-in-bits='64' id='type-id-54'/>
- <typedef-decl name='__uint32_t' type-id='type-id-31' id='type-id-55'/>
- <typedef-decl name='uint32_t' type-id='type-id-55' id='type-id-56'/>
- <function-decl name='uu_avl_walk' mangled-name='uu_avl_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk'>
- <parameter type-id='type-id-46' name='ap'/>
- <parameter type-id='type-id-54' name='func'/>
- <parameter type-id='type-id-6' name='private'/>
- <parameter type-id='type-id-56' name='flags'/>
- <return type-id='type-id-22'/>
- </function-decl>
- <function-decl name='uu_avl_walk_end' mangled-name='uu_avl_walk_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_end'>
- <parameter type-id='type-id-45' name='wp'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_avl_walk_next' mangled-name='uu_avl_walk_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_next'>
- <parameter type-id='type-id-45' name='wp'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_avl_walk_start' mangled-name='uu_avl_walk_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_start'>
- <parameter type-id='type-id-46' name='ap'/>
- <parameter type-id='type-id-56' name='flags'/>
- <return type-id='type-id-45'/>
- </function-decl>
- <function-decl name='uu_avl_prev' mangled-name='uu_avl_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_prev'>
- <parameter type-id='type-id-46' name='ap'/>
- <parameter type-id='type-id-6' name='node'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_avl_next' mangled-name='uu_avl_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_next'>
- <parameter type-id='type-id-46' name='ap'/>
- <parameter type-id='type-id-6' name='node'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_avl_last' mangled-name='uu_avl_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_last'>
- <parameter type-id='type-id-46' name='ap'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_avl_first' mangled-name='uu_avl_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_first'>
- <parameter type-id='type-id-46' name='ap'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_avl_numnodes' mangled-name='uu_avl_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_numnodes'>
- <parameter type-id='type-id-46' name='ap'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='uu_avl_destroy' mangled-name='uu_avl_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_destroy'>
- <parameter type-id='type-id-46' name='ap'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_avl_create' mangled-name='uu_avl_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_create'>
- <parameter type-id='type-id-11' name='pp'/>
- <parameter type-id='type-id-6' name='parent'/>
- <parameter type-id='type-id-56' name='flags'/>
- <return type-id='type-id-46'/>
- </function-decl>
- <class-decl name='uu_avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-57'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='uan_opaque' type-id='type-id-58' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-10' size-in-bits='192' id='type-id-58'>
- <subrange length='3' type-id='type-id-7' id='type-id-59'/>
-
- </array-type-def>
- <typedef-decl name='uu_avl_node_t' type-id='type-id-57' id='type-id-60'/>
- <pointer-type-def type-id='type-id-60' size-in-bits='64' id='type-id-61'/>
- <function-decl name='uu_avl_node_fini' mangled-name='uu_avl_node_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_node_fini'>
- <parameter type-id='type-id-6' name='base'/>
- <parameter type-id='type-id-61' name='np'/>
- <parameter type-id='type-id-11' name='pp'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_avl_node_init' mangled-name='uu_avl_node_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_node_init'>
- <parameter type-id='type-id-6' name='base'/>
- <parameter type-id='type-id-61' name='np'/>
- <parameter type-id='type-id-11' name='pp'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_avl_pool_destroy' mangled-name='uu_avl_pool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_pool_destroy'>
- <parameter type-id='type-id-11' name='pp'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_avl_pool_create' mangled-name='uu_avl_pool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_pool_create'>
- <parameter type-id='type-id-4' name='name'/>
- <parameter type-id='type-id-8' name='objsize'/>
- <parameter type-id='type-id-8' name='nodeoffset'/>
- <parameter type-id='type-id-17' name='compare_func'/>
- <parameter type-id='type-id-56' name='flags'/>
- <return type-id='type-id-11'/>
- </function-decl>
- <function-decl name='pthread_mutex_unlock' mangled-name='pthread_mutex_unlock' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='pthread_mutex_lock' mangled-name='pthread_mutex_lock' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_panic' mangled-name='uu_panic' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='avl_nearest' mangled-name='avl_nearest' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='avl_insert' mangled-name='avl_insert' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='avl_remove' mangled-name='avl_remove' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_free' mangled-name='uu_free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_zalloc' mangled-name='uu_zalloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='avl_last' mangled-name='avl_last' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='avl_numnodes' mangled-name='avl_numnodes' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='pthread_mutex_destroy' mangled-name='pthread_mutex_destroy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_check_name' mangled-name='uu_check_name' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='pthread_mutex_init' mangled-name='pthread_mutex_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-type size-in-bits='64' id='type-id-43'>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-6'/>
- <return type-id='type-id-22'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-23'>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-6'/>
- <parameter type-id='type-id-6'/>
- <return type-id='type-id-22'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='uu_ident.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
- <typedef-decl name='uint_t' type-id='type-id-31' id='type-id-62'/>
- <function-decl name='uu_check_name' mangled-name='uu_check_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_check_name'>
- <parameter type-id='type-id-4' name='name'/>
- <parameter type-id='type-id-62' name='flags'/>
- <return type-id='type-id-22'/>
- </function-decl>
- <function-decl name='strchr' mangled-name='strchr' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='uu_list.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
- <function-decl name='uu_list_release' mangled-name='uu_list_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_release'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_list_lockup' mangled-name='uu_list_lockup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_lockup'>
- <return type-id='type-id-5'/>
- </function-decl>
- <class-decl name='uu_list' size-in-bits='896' is-struct='yes' visibility='default' id='type-id-63'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='ul_next_enc' type-id='type-id-10' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='ul_prev_enc' type-id='type-id-10' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='ul_pool' type-id='type-id-64' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='ul_parent_enc' type-id='type-id-10' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='ul_offset' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='ul_numnodes' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='ul_debug' type-id='type-id-12' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='392'>
- <var-decl name='ul_sorted' type-id='type-id-12' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='400'>
- <var-decl name='ul_index' type-id='type-id-12' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='ul_null_node' type-id='type-id-65' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='ul_null_walk' type-id='type-id-66' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='uu_list_pool' size-in-bits='2112' is-struct='yes' visibility='default' id='type-id-67'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='ulp_next' type-id='type-id-64' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='ulp_prev' type-id='type-id-64' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='ulp_name' type-id='type-id-16' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='ulp_nodeoffset' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='ulp_objsize' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='ulp_cmp' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='ulp_debug' type-id='type-id-12' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='840'>
- <var-decl name='ulp_last_index' type-id='type-id-12' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='ulp_lock' type-id='type-id-18' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='ulp_null_list' type-id='type-id-68' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='uu_list_pool_t' type-id='type-id-67' id='type-id-69'/>
- <pointer-type-def type-id='type-id-69' size-in-bits='64' id='type-id-64'/>
- <typedef-decl name='uu_list_t' type-id='type-id-63' id='type-id-68'/>
- <class-decl name='uu_list_node_impl' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-70'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='uln_next' type-id='type-id-71' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='uln_prev' type-id='type-id-71' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-70' size-in-bits='64' id='type-id-71'/>
- <typedef-decl name='uu_list_node_impl_t' type-id='type-id-70' id='type-id-65'/>
- <class-decl name='uu_list_walk' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-72'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='ulw_next' type-id='type-id-73' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='ulw_prev' type-id='type-id-73' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='ulw_list' type-id='type-id-74' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='ulw_dir' type-id='type-id-47' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='200'>
- <var-decl name='ulw_robust' type-id='type-id-12' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='ulw_next_result' type-id='type-id-75' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='uu_list_walk_t' type-id='type-id-72' id='type-id-66'/>
- <pointer-type-def type-id='type-id-66' size-in-bits='64' id='type-id-73'/>
- <pointer-type-def type-id='type-id-68' size-in-bits='64' id='type-id-74'/>
- <pointer-type-def type-id='type-id-65' size-in-bits='64' id='type-id-75'/>
- <function-decl name='uu_list_prev' mangled-name='uu_list_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_prev'>
- <parameter type-id='type-id-74' name='lp'/>
- <parameter type-id='type-id-6' name='elem'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_list_next' mangled-name='uu_list_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_next'>
- <parameter type-id='type-id-74' name='lp'/>
- <parameter type-id='type-id-6' name='elem'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_list_last' mangled-name='uu_list_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_last'>
- <parameter type-id='type-id-74' name='lp'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_list_first' mangled-name='uu_list_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_first'>
- <parameter type-id='type-id-74' name='lp'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_list_numnodes' mangled-name='uu_list_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_numnodes'>
- <parameter type-id='type-id-74' name='lp'/>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='uu_list_insert_after' mangled-name='uu_list_insert_after' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert_after'>
- <parameter type-id='type-id-74' name='lp'/>
- <parameter type-id='type-id-6' name='target'/>
- <parameter type-id='type-id-6' name='elem'/>
- <return type-id='type-id-22'/>
- </function-decl>
- <function-decl name='uu_list_insert_before' mangled-name='uu_list_insert_before' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert_before'>
- <parameter type-id='type-id-74' name='lp'/>
- <parameter type-id='type-id-6' name='target'/>
- <parameter type-id='type-id-6' name='elem'/>
- <return type-id='type-id-22'/>
- </function-decl>
- <function-decl name='uu_list_teardown' mangled-name='uu_list_teardown' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_teardown'>
- <parameter type-id='type-id-74' name='lp'/>
- <parameter type-id='type-id-52' name='cookie'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_list_remove' mangled-name='uu_list_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_remove'>
- <parameter type-id='type-id-74' name='lp'/>
- <parameter type-id='type-id-6' name='elem'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_list_walk' mangled-name='uu_list_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk'>
- <parameter type-id='type-id-74' name='lp'/>
- <parameter type-id='type-id-54' name='func'/>
- <parameter type-id='type-id-6' name='private'/>
- <parameter type-id='type-id-56' name='flags'/>
- <return type-id='type-id-22'/>
- </function-decl>
- <function-decl name='uu_list_walk_end' mangled-name='uu_list_walk_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_end'>
- <parameter type-id='type-id-73' name='wp'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_list_walk_next' mangled-name='uu_list_walk_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_next'>
- <parameter type-id='type-id-73' name='wp'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_list_walk_start' mangled-name='uu_list_walk_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_start'>
- <parameter type-id='type-id-74' name='lp'/>
- <parameter type-id='type-id-56' name='flags'/>
- <return type-id='type-id-73'/>
- </function-decl>
- <typedef-decl name='uu_list_index_t' type-id='type-id-10' id='type-id-76'/>
- <function-decl name='uu_list_nearest_prev' mangled-name='uu_list_nearest_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_nearest_prev'>
- <parameter type-id='type-id-74' name='lp'/>
- <parameter type-id='type-id-76' name='idx'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_list_nearest_next' mangled-name='uu_list_nearest_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_nearest_next'>
- <parameter type-id='type-id-74' name='lp'/>
- <parameter type-id='type-id-76' name='idx'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <pointer-type-def type-id='type-id-76' size-in-bits='64' id='type-id-77'/>
- <function-decl name='uu_list_find' mangled-name='uu_list_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_find'>
- <parameter type-id='type-id-74' name='lp'/>
- <parameter type-id='type-id-6' name='elem'/>
- <parameter type-id='type-id-6' name='private'/>
- <parameter type-id='type-id-77' name='out'/>
- <return type-id='type-id-6'/>
- </function-decl>
- <function-decl name='uu_list_insert' mangled-name='uu_list_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert'>
- <parameter type-id='type-id-74' name='lp'/>
- <parameter type-id='type-id-6' name='elem'/>
- <parameter type-id='type-id-76' name='idx'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_list_destroy' mangled-name='uu_list_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_destroy'>
- <parameter type-id='type-id-74' name='lp'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_list_create' mangled-name='uu_list_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_create'>
- <parameter type-id='type-id-64' name='pp'/>
- <parameter type-id='type-id-6' name='parent'/>
- <parameter type-id='type-id-56' name='flags'/>
- <return type-id='type-id-74'/>
- </function-decl>
- <class-decl name='uu_list_node' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-78'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='uln_opaque' type-id='type-id-79' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-10' size-in-bits='128' id='type-id-79'>
- <subrange length='2' type-id='type-id-7' id='type-id-42'/>
-
- </array-type-def>
- <typedef-decl name='uu_list_node_t' type-id='type-id-78' id='type-id-80'/>
- <pointer-type-def type-id='type-id-80' size-in-bits='64' id='type-id-81'/>
- <function-decl name='uu_list_node_fini' mangled-name='uu_list_node_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_node_fini'>
- <parameter type-id='type-id-6' name='base'/>
- <parameter type-id='type-id-81' name='np_arg'/>
- <parameter type-id='type-id-64' name='pp'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_list_node_init' mangled-name='uu_list_node_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_node_init'>
- <parameter type-id='type-id-6' name='base'/>
- <parameter type-id='type-id-81' name='np_arg'/>
- <parameter type-id='type-id-64' name='pp'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_list_pool_destroy' mangled-name='uu_list_pool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_pool_destroy'>
- <parameter type-id='type-id-64' name='pp'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_list_pool_create' mangled-name='uu_list_pool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_pool_create'>
- <parameter type-id='type-id-4' name='name'/>
- <parameter type-id='type-id-8' name='objsize'/>
- <parameter type-id='type-id-8' name='nodeoffset'/>
- <parameter type-id='type-id-17' name='compare_func'/>
- <parameter type-id='type-id-56' name='flags'/>
- <return type-id='type-id-64'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='uu_misc.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
- <function-decl name='uu_panic' mangled-name='uu_panic' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_panic'>
- <parameter type-id='type-id-4' name='format'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_strerror' mangled-name='uu_strerror' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strerror'>
- <parameter type-id='type-id-56' name='code'/>
- <return type-id='type-id-4'/>
- </function-decl>
- <function-decl name='uu_error' mangled-name='uu_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_error'>
- <return type-id='type-id-56'/>
- </function-decl>
- <function-decl name='uu_set_error' mangled-name='uu_set_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_set_error'>
- <parameter type-id='type-id-62' name='code'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='pthread_atfork' mangled-name='pthread_atfork' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='__vfprintf_chk' mangled-name='__vfprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='pthread_self' mangled-name='pthread_self' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='pause' mangled-name='pause' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='abort' mangled-name='abort' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='dcgettext' mangled-name='dcgettext' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='__errno_location' mangled-name='__errno_location' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='pthread_getspecific' mangled-name='pthread_getspecific' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='pthread_setspecific' mangled-name='pthread_setspecific' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='pthread_key_create' mangled-name='pthread_key_create' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='uu_pname.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
- <var-decl name='uu_exit_ok_value' type-id='type-id-22' mangled-name='uu_exit_ok_value' visibility='default' elf-symbol-id='uu_exit_ok_value'/>
- <var-decl name='uu_exit_fatal_value' type-id='type-id-22' mangled-name='uu_exit_fatal_value' visibility='default' elf-symbol-id='uu_exit_fatal_value'/>
- <var-decl name='uu_exit_usage_value' type-id='type-id-22' mangled-name='uu_exit_usage_value' visibility='default' elf-symbol-id='uu_exit_usage_value'/>
- <function-decl name='uu_getpname' mangled-name='uu_getpname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_getpname'>
- <return type-id='type-id-4'/>
- </function-decl>
- <function-decl name='uu_setpname' mangled-name='uu_setpname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_setpname'>
- <parameter type-id='type-id-2' name='arg0'/>
- <return type-id='type-id-4'/>
- </function-decl>
- <function-decl name='uu_xdie' mangled-name='uu_xdie' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_xdie'>
- <parameter type-id='type-id-22' name='status'/>
- <parameter type-id='type-id-4' name='format'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <class-decl name='__va_list_tag' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-82'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='gp_offset' type-id='type-id-31' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='fp_offset' type-id='type-id-31' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='overflow_arg_area' type-id='type-id-6' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='reg_save_area' type-id='type-id-6' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-82' size-in-bits='64' id='type-id-83'/>
- <function-decl name='uu_vxdie' mangled-name='uu_vxdie' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vxdie'>
- <parameter type-id='type-id-22' name='status'/>
- <parameter type-id='type-id-4' name='format'/>
- <parameter type-id='type-id-83' name='alist'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_die' mangled-name='uu_die' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_die'>
- <parameter type-id='type-id-4' name='format'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_vdie' mangled-name='uu_vdie' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vdie'>
- <parameter type-id='type-id-4' name='format'/>
- <parameter type-id='type-id-83' name='alist'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_warn' mangled-name='uu_warn' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_warn'>
- <parameter type-id='type-id-4' name='format'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_vwarn' mangled-name='uu_vwarn' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vwarn'>
- <parameter type-id='type-id-4' name='format'/>
- <parameter type-id='type-id-83' name='alist'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='uu_alt_exit' mangled-name='uu_alt_exit' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_alt_exit'>
- <parameter type-id='type-id-22' name='profile'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <pointer-type-def type-id='type-id-22' size-in-bits='64' id='type-id-84'/>
- <function-decl name='uu_exit_usage' mangled-name='uu_exit_usage' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_usage'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='uu_exit_fatal' mangled-name='uu_exit_fatal' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_fatal'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='uu_exit_ok' mangled-name='uu_exit_ok' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_ok'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='strrchr' mangled-name='strrchr' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='getexecname' mangled-name='getexecname' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='exit' mangled-name='exit' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='__fprintf_chk' mangled-name='__fprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='strerror' mangled-name='strerror' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='uu_string.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libuutil' language='LANG_C99'>
- <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-85'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-86'>
- <underlying-type type-id='type-id-85'/>
- <enumerator name='B_FALSE' value='0'/>
- <enumerator name='B_TRUE' value='1'/>
- </enum-decl>
- <typedef-decl name='boolean_t' type-id='type-id-86' id='type-id-87'/>
- <function-decl name='uu_strbw' mangled-name='uu_strbw' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strbw'>
- <parameter type-id='type-id-4' name='a'/>
- <parameter type-id='type-id-4' name='b'/>
- <return type-id='type-id-87'/>
- </function-decl>
- <function-decl name='uu_strcaseeq' mangled-name='uu_strcaseeq' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strcaseeq'>
- <parameter type-id='type-id-4' name='a'/>
- <parameter type-id='type-id-4' name='b'/>
- <return type-id='type-id-87'/>
- </function-decl>
- <function-decl name='uu_streq' mangled-name='uu_streq' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_streq'>
- <parameter type-id='type-id-4' name='a'/>
- <parameter type-id='type-id-4' name='b'/>
- <return type-id='type-id-87'/>
- </function-decl>
- <function-decl name='strncmp' mangled-name='strncmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='strcasecmp' mangled-name='strcasecmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='strcmp' mangled-name='strcmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/avl/avl.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libavl' language='LANG_C99'>
- <typedef-decl name='avl_tree_t' type-id='type-id-13' id='type-id-88'/>
- <pointer-type-def type-id='type-id-88' size-in-bits='64' id='type-id-89'/>
+ <abi-instr version='1.0' address-size='64' path='../../module/avl/avl.c' language='LANG_C99'>
+ <typedef-decl name='avl_tree_t' type-id='b351119f' id='f20fbd51'/>
+ <typedef-decl name='avl_index_t' type-id='e475ab95' id='fba6cb51'/>
+ <pointer-type-def type-id='fba6cb51' size-in-bits='64' id='32adbf30'/>
+ <pointer-type-def type-id='f20fbd51' size-in-bits='64' id='a3681dea'/>
<function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy_nodes'>
- <parameter type-id='type-id-89' name='tree'/>
- <parameter type-id='type-id-52' name='cookie'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='63e171df' name='cookie'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_is_empty' mangled-name='avl_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_is_empty'>
- <parameter type-id='type-id-89' name='tree'/>
- <return type-id='type-id-87'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_numnodes' mangled-name='avl_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_numnodes'>
- <parameter type-id='type-id-89' name='tree'/>
- <return type-id='type-id-39'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <return type-id='ee1f298e'/>
</function-decl>
<function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy'>
- <parameter type-id='type-id-89' name='tree'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_create'>
- <parameter type-id='type-id-89' name='tree'/>
- <parameter type-id='type-id-38' name='compar'/>
- <parameter type-id='type-id-8' name='size'/>
- <parameter type-id='type-id-8' name='offset'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='585e1de9' name='compar'/>
+ <parameter type-id='b59d7dce' name='size'/>
+ <parameter type-id='b59d7dce' name='offset'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_swap' mangled-name='avl_swap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_swap'>
- <parameter type-id='type-id-89' name='tree1'/>
- <parameter type-id='type-id-89' name='tree2'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='a3681dea' name='tree1'/>
+ <parameter type-id='a3681dea' name='tree2'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_update' mangled-name='avl_update' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update'>
- <parameter type-id='type-id-89' name='t'/>
- <parameter type-id='type-id-6' name='obj'/>
- <return type-id='type-id-87'/>
+ <parameter type-id='a3681dea' name='t'/>
+ <parameter type-id='eaa32e2f' name='obj'/>
+ <return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_update_gt' mangled-name='avl_update_gt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_gt'>
- <parameter type-id='type-id-89' name='t'/>
- <parameter type-id='type-id-6' name='obj'/>
- <return type-id='type-id-87'/>
+ <parameter type-id='a3681dea' name='t'/>
+ <parameter type-id='eaa32e2f' name='obj'/>
+ <return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_update_lt' mangled-name='avl_update_lt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_lt'>
- <parameter type-id='type-id-89' name='t'/>
- <parameter type-id='type-id-6' name='obj'/>
- <return type-id='type-id-87'/>
+ <parameter type-id='a3681dea' name='t'/>
+ <parameter type-id='eaa32e2f' name='obj'/>
+ <return type-id='c19b74c3'/>
</function-decl>
<function-decl name='avl_remove' mangled-name='avl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_remove'>
- <parameter type-id='type-id-89' name='tree'/>
- <parameter type-id='type-id-6' name='data'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_add' mangled-name='avl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_add'>
- <parameter type-id='type-id-89' name='tree'/>
- <parameter type-id='type-id-6' name='new_node'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='eaa32e2f' name='new_node'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='avl_insert_here' mangled-name='avl_insert_here' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert_here'>
- <parameter type-id='type-id-89' name='tree'/>
- <parameter type-id='type-id-6' name='new_data'/>
- <parameter type-id='type-id-6' name='here'/>
- <parameter type-id='type-id-22' name='direction'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='eaa32e2f' name='new_data'/>
+ <parameter type-id='eaa32e2f' name='here'/>
+ <parameter type-id='95e97e5e' name='direction'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <typedef-decl name='avl_index_t' type-id='type-id-10' id='type-id-90'/>
<function-decl name='avl_insert' mangled-name='avl_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert'>
- <parameter type-id='type-id-89' name='tree'/>
- <parameter type-id='type-id-6' name='new_data'/>
- <parameter type-id='type-id-90' name='where'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='eaa32e2f' name='new_data'/>
+ <parameter type-id='fba6cb51' name='where'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <pointer-type-def type-id='type-id-90' size-in-bits='64' id='type-id-91'/>
<function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_find'>
- <parameter type-id='type-id-89' name='tree'/>
- <parameter type-id='type-id-6' name='value'/>
- <parameter type-id='type-id-91' name='where'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='eaa32e2f' name='value'/>
+ <parameter type-id='32adbf30' name='where'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_nearest' mangled-name='avl_nearest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_nearest'>
- <parameter type-id='type-id-89' name='tree'/>
- <parameter type-id='type-id-90' name='where'/>
- <parameter type-id='type-id-22' name='direction'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='fba6cb51' name='where'/>
+ <parameter type-id='95e97e5e' name='direction'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_last' mangled-name='avl_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_last'>
- <parameter type-id='type-id-89' name='tree'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_first'>
- <parameter type-id='type-id-89' name='tree'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_walk'>
- <parameter type-id='type-id-89' name='tree'/>
- <parameter type-id='type-id-6' name='oldnode'/>
- <parameter type-id='type-id-22' name='left'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='eaa32e2f' name='oldnode'/>
+ <parameter type-id='95e97e5e' name='left'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='b351119f'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='avl_root' type-id='bf311473' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='avl_compar' type-id='585e1de9' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='avl_offset' type-id='b59d7dce' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='avl_numnodes' type-id='ee1f298e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='avl_pad' type-id='b59d7dce' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <type-decl name='int' size-in-bits='32' id='95e97e5e'/>
+ <pointer-type-def type-id='96ee24a5' size-in-bits='64' id='585e1de9'/>
+ <typedef-decl name='boolean_t' type-id='08f5ca17' id='c19b74c3'/>
+ <typedef-decl name='size_t' type-id='7359adad' id='b59d7dce'/>
+ <typedef-decl name='uintptr_t' type-id='7359adad' id='e475ab95'/>
+ <typedef-decl name='ulong_t' type-id='7359adad' id='ee1f298e'/>
+ <type-decl name='void' id='48b5725f'/>
+ <pointer-type-def type-id='48b5725f' size-in-bits='64' id='eaa32e2f'/>
+ <pointer-type-def type-id='eaa32e2f' size-in-bits='64' id='63e171df'/>
+ <pointer-type-def type-id='428b67b3' size-in-bits='64' id='bf311473'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca17'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='B_FALSE' value='0'/>
+ <enumerator name='B_TRUE' value='1'/>
+ </enum-decl>
+ <type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
+ <class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='428b67b3'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='avl_child' type-id='f0f65199' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='avl_pcb' type-id='e475ab95' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <array-type-def dimensions='1' type-id='bf311473' size-in-bits='128' id='f0f65199'>
+ <subrange length='2' type-id='7359adad' id='52efc4ef'/>
+ </array-type-def>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='assert.c' language='LANG_C99'>
+ <var-decl name='libspl_assert_ok' type-id='95e97e5e' mangled-name='libspl_assert_ok' visibility='default' elf-symbol-id='libspl_assert_ok'/>
+ <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_assertf'>
+ <parameter type-id='80f4b756' name='file'/>
+ <parameter type-id='80f4b756' name='func'/>
+ <parameter type-id='95e97e5e' name='line'/>
+ <parameter type-id='80f4b756' name='format'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='48b5725f'/>
</function-decl>
+ <pointer-type-def type-id='9b45d938' size-in-bits='64' id='80f4b756'/>
+ <qualified-type-def type-id='a84c031d' const='yes' id='9b45d938'/>
+ <type-decl name='char' size-in-bits='8' id='a84c031d'/>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='atomic.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='atomic.c' language='LANG_C99'>
+ <type-decl name='unsigned short int' size-in-bits='16' id='8efea9e5'/>
+ <typedef-decl name='uint16_t' type-id='253c2d2a' id='149c6638'/>
+ <typedef-decl name='__uint16_t' type-id='8efea9e5' id='253c2d2a'/>
+ <typedef-decl name='ssize_t' type-id='41060289' id='79a0948f'/>
+ <typedef-decl name='__ssize_t' type-id='bd54fe1a' id='41060289'/>
+ <typedef-decl name='int32_t' type-id='33f57a65' id='3ff5601b'/>
+ <typedef-decl name='__int32_t' type-id='95e97e5e' id='33f57a65'/>
+ <typedef-decl name='int16_t' type-id='03896e23' id='23bd8cb5'/>
+ <typedef-decl name='__int16_t' type-id='a2185560' id='03896e23'/>
+ <qualified-type-def type-id='149c6638' volatile='yes' id='5120c5f7'/>
+ <pointer-type-def type-id='5120c5f7' size-in-bits='64' id='93977ae7'/>
+ <qualified-type-def type-id='8f92235e' volatile='yes' id='430e0681'/>
+ <pointer-type-def type-id='430e0681' size-in-bits='64' id='3a147f31'/>
+ <qualified-type-def type-id='b96825af' volatile='yes' id='84ff7d66'/>
+ <pointer-type-def type-id='84ff7d66' size-in-bits='64' id='aa323ea4'/>
+ <qualified-type-def type-id='ee1f298e' volatile='yes' id='6f7e09cb'/>
+ <pointer-type-def type-id='6f7e09cb' size-in-bits='64' id='64698d33'/>
+ <qualified-type-def type-id='48b5725f' volatile='yes' id='b0b3cbf9'/>
+ <pointer-type-def type-id='b0b3cbf9' size-in-bits='64' id='fe09dd29'/>
<function-decl name='membar_consumer' mangled-name='membar_consumer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_consumer'>
- <return type-id='type-id-5'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='membar_producer' mangled-name='membar_producer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_producer'>
- <return type-id='type-id-5'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='membar_enter' mangled-name='membar_enter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_enter'>
- <return type-id='type-id-5'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <qualified-type-def type-id='type-id-39' volatile='yes' id='type-id-92'/>
- <pointer-type-def type-id='type-id-92' size-in-bits='64' id='type-id-93'/>
<function-decl name='atomic_clear_long_excl' mangled-name='atomic_clear_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_clear_long_excl'>
- <parameter type-id='type-id-93' name='target'/>
- <parameter type-id='type-id-62' name='value'/>
- <return type-id='type-id-22'/>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='3502e3ff' name='value'/>
+ <return type-id='95e97e5e'/>
</function-decl>
<function-decl name='atomic_set_long_excl' mangled-name='atomic_set_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_set_long_excl'>
- <parameter type-id='type-id-93' name='target'/>
- <parameter type-id='type-id-62' name='value'/>
- <return type-id='type-id-22'/>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='3502e3ff' name='value'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <qualified-type-def type-id='type-id-5' volatile='yes' id='type-id-94'/>
- <pointer-type-def type-id='type-id-94' size-in-bits='64' id='type-id-95'/>
<function-decl name='atomic_swap_ptr' mangled-name='atomic_swap_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ptr'>
- <parameter type-id='type-id-95' name='target'/>
- <parameter type-id='type-id-6' name='bits'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='fe09dd29' name='target'/>
+ <parameter type-id='eaa32e2f' name='bits'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_swap_ulong' mangled-name='atomic_swap_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ulong'>
- <parameter type-id='type-id-93' name='target'/>
- <parameter type-id='type-id-39' name='bits'/>
- <return type-id='type-id-39'/>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='ee1f298e' name='bits'/>
+ <return type-id='ee1f298e'/>
</function-decl>
- <qualified-type-def type-id='type-id-56' volatile='yes' id='type-id-96'/>
- <pointer-type-def type-id='type-id-96' size-in-bits='64' id='type-id-97'/>
<function-decl name='atomic_swap_32' mangled-name='atomic_swap_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_32'>
- <parameter type-id='type-id-97' name='target'/>
- <parameter type-id='type-id-56' name='bits'/>
- <return type-id='type-id-56'/>
- </function-decl>
- <type-decl name='unsigned short int' size-in-bits='16' id='type-id-98'/>
- <typedef-decl name='__uint16_t' type-id='type-id-98' id='type-id-99'/>
- <typedef-decl name='uint16_t' type-id='type-id-99' id='type-id-100'/>
- <qualified-type-def type-id='type-id-100' volatile='yes' id='type-id-101'/>
- <pointer-type-def type-id='type-id-101' size-in-bits='64' id='type-id-102'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='8f92235e' name='bits'/>
+ <return type-id='8f92235e'/>
+ </function-decl>
<function-decl name='atomic_swap_16' mangled-name='atomic_swap_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_16'>
- <parameter type-id='type-id-102' name='target'/>
- <parameter type-id='type-id-100' name='bits'/>
- <return type-id='type-id-100'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='149c6638' name='bits'/>
+ <return type-id='149c6638'/>
</function-decl>
- <qualified-type-def type-id='type-id-12' volatile='yes' id='type-id-103'/>
- <pointer-type-def type-id='type-id-103' size-in-bits='64' id='type-id-104'/>
<function-decl name='atomic_swap_8' mangled-name='atomic_swap_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_8'>
- <parameter type-id='type-id-104' name='target'/>
- <parameter type-id='type-id-12' name='bits'/>
- <return type-id='type-id-12'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='b96825af' name='bits'/>
+ <return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_cas_ptr' mangled-name='atomic_cas_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ptr'>
- <parameter type-id='type-id-95' name='target'/>
- <parameter type-id='type-id-6' name='exp'/>
- <parameter type-id='type-id-6' name='des'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='fe09dd29' name='target'/>
+ <parameter type-id='eaa32e2f' name='exp'/>
+ <parameter type-id='eaa32e2f' name='des'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_and_ulong_nv' mangled-name='atomic_and_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong_nv'>
- <parameter type-id='type-id-93' name='target'/>
- <parameter type-id='type-id-39' name='bits'/>
- <return type-id='type-id-39'/>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='ee1f298e' name='bits'/>
+ <return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_and_32_nv' mangled-name='atomic_and_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32_nv'>
- <parameter type-id='type-id-97' name='target'/>
- <parameter type-id='type-id-56' name='bits'/>
- <return type-id='type-id-56'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='8f92235e' name='bits'/>
+ <return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_and_16_nv' mangled-name='atomic_and_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16_nv'>
- <parameter type-id='type-id-102' name='target'/>
- <parameter type-id='type-id-100' name='bits'/>
- <return type-id='type-id-100'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='149c6638' name='bits'/>
+ <return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_and_8_nv' mangled-name='atomic_and_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8_nv'>
- <parameter type-id='type-id-104' name='target'/>
- <parameter type-id='type-id-12' name='bits'/>
- <return type-id='type-id-12'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='b96825af' name='bits'/>
+ <return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_or_ulong_nv' mangled-name='atomic_or_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong_nv'>
- <parameter type-id='type-id-93' name='target'/>
- <parameter type-id='type-id-39' name='bits'/>
- <return type-id='type-id-39'/>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='ee1f298e' name='bits'/>
+ <return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_or_32_nv' mangled-name='atomic_or_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32_nv'>
- <parameter type-id='type-id-97' name='target'/>
- <parameter type-id='type-id-56' name='bits'/>
- <return type-id='type-id-56'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='8f92235e' name='bits'/>
+ <return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_or_16_nv' mangled-name='atomic_or_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16_nv'>
- <parameter type-id='type-id-102' name='target'/>
- <parameter type-id='type-id-100' name='bits'/>
- <return type-id='type-id-100'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='149c6638' name='bits'/>
+ <return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_or_8_nv' mangled-name='atomic_or_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8_nv'>
- <parameter type-id='type-id-104' name='target'/>
- <parameter type-id='type-id-12' name='bits'/>
- <return type-id='type-id-12'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='b96825af' name='bits'/>
+ <return type-id='b96825af'/>
</function-decl>
- <typedef-decl name='__ssize_t' type-id='type-id-30' id='type-id-105'/>
- <typedef-decl name='ssize_t' type-id='type-id-105' id='type-id-106'/>
<function-decl name='atomic_sub_ptr_nv' mangled-name='atomic_sub_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr_nv'>
- <parameter type-id='type-id-95' name='target'/>
- <parameter type-id='type-id-106' name='bits'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='fe09dd29' name='target'/>
+ <parameter type-id='79a0948f' name='bits'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_sub_long_nv' mangled-name='atomic_sub_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long_nv'>
- <parameter type-id='type-id-93' name='target'/>
- <parameter type-id='type-id-30' name='bits'/>
- <return type-id='type-id-39'/>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='bd54fe1a' name='bits'/>
+ <return type-id='ee1f298e'/>
</function-decl>
- <typedef-decl name='__int32_t' type-id='type-id-22' id='type-id-107'/>
- <typedef-decl name='int32_t' type-id='type-id-107' id='type-id-108'/>
<function-decl name='atomic_sub_32_nv' mangled-name='atomic_sub_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32_nv'>
- <parameter type-id='type-id-97' name='target'/>
- <parameter type-id='type-id-108' name='bits'/>
- <return type-id='type-id-56'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='3ff5601b' name='bits'/>
+ <return type-id='8f92235e'/>
</function-decl>
- <typedef-decl name='__int16_t' type-id='type-id-32' id='type-id-109'/>
- <typedef-decl name='int16_t' type-id='type-id-109' id='type-id-110'/>
<function-decl name='atomic_sub_16_nv' mangled-name='atomic_sub_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16_nv'>
- <parameter type-id='type-id-102' name='target'/>
- <parameter type-id='type-id-110' name='bits'/>
- <return type-id='type-id-100'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='23bd8cb5' name='bits'/>
+ <return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_sub_8_nv' mangled-name='atomic_sub_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8_nv'>
- <parameter type-id='type-id-104' name='target'/>
- <parameter type-id='type-id-47' name='bits'/>
- <return type-id='type-id-12'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='ee31ee44' name='bits'/>
+ <return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_add_ptr_nv' mangled-name='atomic_add_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr_nv'>
- <parameter type-id='type-id-95' name='target'/>
- <parameter type-id='type-id-106' name='bits'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='fe09dd29' name='target'/>
+ <parameter type-id='79a0948f' name='bits'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='atomic_add_long_nv' mangled-name='atomic_add_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long_nv'>
- <parameter type-id='type-id-93' name='target'/>
- <parameter type-id='type-id-30' name='bits'/>
- <return type-id='type-id-39'/>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='bd54fe1a' name='bits'/>
+ <return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_add_32_nv' mangled-name='atomic_add_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32_nv'>
- <parameter type-id='type-id-97' name='target'/>
- <parameter type-id='type-id-108' name='bits'/>
- <return type-id='type-id-56'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='3ff5601b' name='bits'/>
+ <return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_add_16_nv' mangled-name='atomic_add_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16_nv'>
- <parameter type-id='type-id-102' name='target'/>
- <parameter type-id='type-id-110' name='bits'/>
- <return type-id='type-id-100'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='23bd8cb5' name='bits'/>
+ <return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_add_8_nv' mangled-name='atomic_add_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8_nv'>
- <parameter type-id='type-id-104' name='target'/>
- <parameter type-id='type-id-47' name='bits'/>
- <return type-id='type-id-12'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='ee31ee44' name='bits'/>
+ <return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_dec_ulong_nv' mangled-name='atomic_dec_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong_nv'>
- <parameter type-id='type-id-93' name='target'/>
- <return type-id='type-id-39'/>
+ <parameter type-id='64698d33' name='target'/>
+ <return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_dec_32_nv' mangled-name='atomic_dec_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32_nv'>
- <parameter type-id='type-id-97' name='target'/>
- <return type-id='type-id-56'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_dec_16_nv' mangled-name='atomic_dec_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16_nv'>
- <parameter type-id='type-id-102' name='target'/>
- <return type-id='type-id-100'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_dec_8_nv' mangled-name='atomic_dec_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8_nv'>
- <parameter type-id='type-id-104' name='target'/>
- <return type-id='type-id-12'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_inc_ulong_nv' mangled-name='atomic_inc_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong_nv'>
- <parameter type-id='type-id-93' name='target'/>
- <return type-id='type-id-39'/>
+ <parameter type-id='64698d33' name='target'/>
+ <return type-id='ee1f298e'/>
</function-decl>
<function-decl name='atomic_inc_32_nv' mangled-name='atomic_inc_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32_nv'>
- <parameter type-id='type-id-97' name='target'/>
- <return type-id='type-id-56'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_inc_16_nv' mangled-name='atomic_inc_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16_nv'>
- <parameter type-id='type-id-102' name='target'/>
- <return type-id='type-id-100'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_inc_8_nv' mangled-name='atomic_inc_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8_nv'>
- <parameter type-id='type-id-104' name='target'/>
- <return type-id='type-id-12'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_and_ulong' mangled-name='atomic_and_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong'>
- <parameter type-id='type-id-93' name='target'/>
- <parameter type-id='type-id-39' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='ee1f298e' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_32' mangled-name='atomic_and_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32'>
- <parameter type-id='type-id-97' name='target'/>
- <parameter type-id='type-id-56' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='8f92235e' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_16' mangled-name='atomic_and_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16'>
- <parameter type-id='type-id-102' name='target'/>
- <parameter type-id='type-id-100' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='149c6638' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_and_8' mangled-name='atomic_and_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8'>
- <parameter type-id='type-id-104' name='target'/>
- <parameter type-id='type-id-12' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='b96825af' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_ulong' mangled-name='atomic_or_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong'>
- <parameter type-id='type-id-93' name='target'/>
- <parameter type-id='type-id-39' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='ee1f298e' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_32' mangled-name='atomic_or_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32'>
- <parameter type-id='type-id-97' name='target'/>
- <parameter type-id='type-id-56' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='8f92235e' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_16' mangled-name='atomic_or_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16'>
- <parameter type-id='type-id-102' name='target'/>
- <parameter type-id='type-id-100' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='149c6638' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_or_8' mangled-name='atomic_or_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8'>
- <parameter type-id='type-id-104' name='target'/>
- <parameter type-id='type-id-12' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='b96825af' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_ptr' mangled-name='atomic_sub_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr'>
- <parameter type-id='type-id-95' name='target'/>
- <parameter type-id='type-id-106' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='fe09dd29' name='target'/>
+ <parameter type-id='79a0948f' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_long' mangled-name='atomic_sub_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long'>
- <parameter type-id='type-id-93' name='target'/>
- <parameter type-id='type-id-30' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='bd54fe1a' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_32' mangled-name='atomic_sub_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32'>
- <parameter type-id='type-id-97' name='target'/>
- <parameter type-id='type-id-108' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='3ff5601b' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_16' mangled-name='atomic_sub_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16'>
- <parameter type-id='type-id-102' name='target'/>
- <parameter type-id='type-id-110' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='23bd8cb5' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_sub_8' mangled-name='atomic_sub_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8'>
- <parameter type-id='type-id-104' name='target'/>
- <parameter type-id='type-id-47' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='ee31ee44' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_ptr' mangled-name='atomic_add_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr'>
- <parameter type-id='type-id-95' name='target'/>
- <parameter type-id='type-id-106' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='fe09dd29' name='target'/>
+ <parameter type-id='79a0948f' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_long' mangled-name='atomic_add_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long'>
- <parameter type-id='type-id-93' name='target'/>
- <parameter type-id='type-id-30' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='bd54fe1a' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_32' mangled-name='atomic_add_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32'>
- <parameter type-id='type-id-97' name='target'/>
- <parameter type-id='type-id-108' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='3ff5601b' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_16' mangled-name='atomic_add_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16'>
- <parameter type-id='type-id-102' name='target'/>
- <parameter type-id='type-id-110' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='23bd8cb5' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_add_8' mangled-name='atomic_add_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8'>
- <parameter type-id='type-id-104' name='target'/>
- <parameter type-id='type-id-47' name='bits'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='ee31ee44' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_ulong' mangled-name='atomic_dec_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong'>
- <parameter type-id='type-id-93' name='target'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='64698d33' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_32' mangled-name='atomic_dec_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32'>
- <parameter type-id='type-id-97' name='target'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_16' mangled-name='atomic_dec_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16'>
- <parameter type-id='type-id-102' name='target'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_dec_8' mangled-name='atomic_dec_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8'>
- <parameter type-id='type-id-104' name='target'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_ulong' mangled-name='atomic_inc_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong'>
- <parameter type-id='type-id-93' name='target'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='64698d33' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_32' mangled-name='atomic_inc_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32'>
- <parameter type-id='type-id-97' name='target'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_16' mangled-name='atomic_inc_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16'>
- <parameter type-id='type-id-102' name='target'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_inc_8' mangled-name='atomic_inc_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8'>
- <parameter type-id='type-id-104' name='target'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='atomic_cas_8' mangled-name='atomic_cas_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_8'>
- <parameter type-id='type-id-104' name='target'/>
- <parameter type-id='type-id-12' name='exp'/>
- <parameter type-id='type-id-12' name='des'/>
- <return type-id='type-id-12'/>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='b96825af' name='exp'/>
+ <parameter type-id='b96825af' name='des'/>
+ <return type-id='b96825af'/>
</function-decl>
<function-decl name='atomic_cas_16' mangled-name='atomic_cas_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_16'>
- <parameter type-id='type-id-102' name='target'/>
- <parameter type-id='type-id-100' name='exp'/>
- <parameter type-id='type-id-100' name='des'/>
- <return type-id='type-id-100'/>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='149c6638' name='exp'/>
+ <parameter type-id='149c6638' name='des'/>
+ <return type-id='149c6638'/>
</function-decl>
<function-decl name='atomic_cas_32' mangled-name='atomic_cas_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_32'>
- <parameter type-id='type-id-97' name='target'/>
- <parameter type-id='type-id-56' name='exp'/>
- <parameter type-id='type-id-56' name='des'/>
- <return type-id='type-id-56'/>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='8f92235e' name='exp'/>
+ <parameter type-id='8f92235e' name='des'/>
+ <return type-id='8f92235e'/>
</function-decl>
<function-decl name='atomic_cas_ulong' mangled-name='atomic_cas_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ulong'>
- <parameter type-id='type-id-93' name='target'/>
- <parameter type-id='type-id-39' name='exp'/>
- <parameter type-id='type-id-39' name='des'/>
- <return type-id='type-id-39'/>
- </function-decl>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='ee1f298e' name='exp'/>
+ <parameter type-id='ee1f298e' name='des'/>
+ <return type-id='ee1f298e'/>
+ </function-decl>
+ <type-decl name='long int' size-in-bits='64' id='bd54fe1a'/>
+ <type-decl name='short int' size-in-bits='16' id='a2185560'/>
+ <typedef-decl name='int8_t' type-id='2171a512' id='ee31ee44'/>
+ <typedef-decl name='uint32_t' type-id='62f1140c' id='8f92235e'/>
+ <typedef-decl name='uint8_t' type-id='c51d6389' id='b96825af'/>
+ <typedef-decl name='uint_t' type-id='f0981eeb' id='3502e3ff'/>
+ <typedef-decl name='__int8_t' type-id='28577a57' id='2171a512'/>
+ <typedef-decl name='__uint32_t' type-id='f0981eeb' id='62f1140c'/>
+ <typedef-decl name='__uint8_t' type-id='002ac4a6' id='c51d6389'/>
+ <type-decl name='unsigned int' size-in-bits='32' id='f0981eeb'/>
+ <type-decl name='signed char' size-in-bits='8' id='28577a57'/>
+ <type-decl name='unsigned char' size-in-bits='8' id='002ac4a6'/>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='getexecname.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='getexecname.c' language='LANG_C99'>
<function-decl name='getexecname' mangled-name='getexecname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getexecname'>
- <return type-id='type-id-4'/>
- </function-decl>
- <function-decl name='getexecname_impl' mangled-name='getexecname_impl' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <return type-id='80f4b756'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='list.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <class-decl name='list' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-111'>
+ <abi-instr version='1.0' address-size='64' path='list.c' language='LANG_C99'>
+ <typedef-decl name='list_t' type-id='e824dae9' id='0899125f'/>
+ <class-decl name='list' size-in-bits='256' is-struct='yes' visibility='default' id='e824dae9'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='list_size' type-id='type-id-8' visibility='default'/>
+ <var-decl name='list_size' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='list_offset' type-id='type-id-8' visibility='default'/>
+ <var-decl name='list_offset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='list_head' type-id='type-id-112' visibility='default'/>
+ <var-decl name='list_head' type-id='b0b5e45e' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='list_node' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-112'>
+ <class-decl name='list_node' size-in-bits='128' is-struct='yes' visibility='default' id='b0b5e45e'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='next' type-id='type-id-113' visibility='default'/>
+ <var-decl name='next' type-id='b03eadb4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='prev' type-id='type-id-113' visibility='default'/>
+ <var-decl name='prev' type-id='b03eadb4' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-112' size-in-bits='64' id='type-id-113'/>
- <typedef-decl name='list_t' type-id='type-id-111' id='type-id-114'/>
- <pointer-type-def type-id='type-id-114' size-in-bits='64' id='type-id-115'/>
+ <typedef-decl name='list_node_t' type-id='b0b5e45e' id='b21843b2'/>
+ <pointer-type-def type-id='b0b5e45e' size-in-bits='64' id='b03eadb4'/>
+ <pointer-type-def type-id='b21843b2' size-in-bits='64' id='ccc38265'/>
+ <pointer-type-def type-id='0899125f' size-in-bits='64' id='352ec160'/>
<function-decl name='list_is_empty' mangled-name='list_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_is_empty'>
- <parameter type-id='type-id-115' name='list'/>
- <return type-id='type-id-22'/>
+ <parameter type-id='352ec160' name='list'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <typedef-decl name='list_node_t' type-id='type-id-112' id='type-id-116'/>
- <pointer-type-def type-id='type-id-116' size-in-bits='64' id='type-id-117'/>
<function-decl name='list_link_active' mangled-name='list_link_active' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_active'>
- <parameter type-id='type-id-117' name='ln'/>
- <return type-id='type-id-22'/>
+ <parameter type-id='ccc38265' name='ln'/>
+ <return type-id='95e97e5e'/>
</function-decl>
<function-decl name='list_link_init' mangled-name='list_link_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_init'>
- <parameter type-id='type-id-117' name='ln'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='ccc38265' name='ln'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_link_replace' mangled-name='list_link_replace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_replace'>
- <parameter type-id='type-id-117' name='lold'/>
- <parameter type-id='type-id-117' name='lnew'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='ccc38265' name='lold'/>
+ <parameter type-id='ccc38265' name='lnew'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_move_tail' mangled-name='list_move_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_move_tail'>
- <parameter type-id='type-id-115' name='dst'/>
- <parameter type-id='type-id-115' name='src'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='352ec160' name='dst'/>
+ <parameter type-id='352ec160' name='src'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_prev' mangled-name='list_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_prev'>
- <parameter type-id='type-id-115' name='list'/>
- <parameter type-id='type-id-6' name='object'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_next' mangled-name='list_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_next'>
- <parameter type-id='type-id-115' name='list'/>
- <parameter type-id='type-id-6' name='object'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_tail' mangled-name='list_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_tail'>
- <parameter type-id='type-id-115' name='list'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='352ec160' name='list'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_head' mangled-name='list_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_head'>
- <parameter type-id='type-id-115' name='list'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='352ec160' name='list'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_remove_tail' mangled-name='list_remove_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_tail'>
- <parameter type-id='type-id-115' name='list'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='352ec160' name='list'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_remove_head' mangled-name='list_remove_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_head'>
- <parameter type-id='type-id-115' name='list'/>
- <return type-id='type-id-6'/>
+ <parameter type-id='352ec160' name='list'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
<function-decl name='list_remove' mangled-name='list_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove'>
- <parameter type-id='type-id-115' name='list'/>
- <parameter type-id='type-id-6' name='object'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_tail' mangled-name='list_insert_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_tail'>
- <parameter type-id='type-id-115' name='list'/>
- <parameter type-id='type-id-6' name='object'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_head' mangled-name='list_insert_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_head'>
- <parameter type-id='type-id-115' name='list'/>
- <parameter type-id='type-id-6' name='object'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_before' mangled-name='list_insert_before' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_before'>
- <parameter type-id='type-id-115' name='list'/>
- <parameter type-id='type-id-6' name='object'/>
- <parameter type-id='type-id-6' name='nobject'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <parameter type-id='eaa32e2f' name='nobject'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_insert_after' mangled-name='list_insert_after' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_after'>
- <parameter type-id='type-id-115' name='list'/>
- <parameter type-id='type-id-6' name='object'/>
- <parameter type-id='type-id-6' name='nobject'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <parameter type-id='eaa32e2f' name='nobject'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_destroy' mangled-name='list_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_destroy'>
- <parameter type-id='type-id-115' name='list'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='352ec160' name='list'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='list_create' mangled-name='list_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_create'>
- <parameter type-id='type-id-115' name='list'/>
- <parameter type-id='type-id-8' name='size'/>
- <parameter type-id='type-id-8' name='offset'/>
- <return type-id='type-id-5'/>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='b59d7dce' name='size'/>
+ <parameter type-id='b59d7dce' name='offset'/>
+ <return type-id='48b5725f'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='mkdirp.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <typedef-decl name='__mode_t' type-id='type-id-31' id='type-id-118'/>
- <typedef-decl name='mode_t' type-id='type-id-118' id='type-id-119'/>
+ <abi-instr version='1.0' address-size='64' path='mkdirp.c' language='LANG_C99'>
+ <typedef-decl name='mode_t' type-id='e1c52942' id='d50d396c'/>
+ <typedef-decl name='__mode_t' type-id='f0981eeb' id='e1c52942'/>
<function-decl name='mkdirp' mangled-name='mkdirp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mkdirp'>
- <parameter type-id='type-id-4' name='d'/>
- <parameter type-id='type-id-119' name='mode'/>
- <return type-id='type-id-22'/>
+ <parameter type-id='80f4b756' name='d'/>
+ <parameter type-id='d50d396c' name='mode'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='__mbstowcs_alias' mangled-name='mbstowcs' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/gethostid.c' language='LANG_C99'>
+ <function-decl name='get_system_hostid' mangled-name='get_system_hostid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_system_hostid'>
+ <return type-id='7359adad'/>
</function-decl>
- <function-decl name='__wcstombs_alias' mangled-name='wcstombs' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/getmntany.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='03085adc' size-in-bits='192' id='083f8d58'>
+ <subrange length='3' type-id='7359adad' id='56f209d2'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='8' id='89feb1ec'>
+ <subrange length='1' type-id='7359adad' id='52f813b4'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='160' id='664ac0b7'>
+ <subrange length='20' type-id='7359adad' id='fdca39cf'/>
+ </array-type-def>
+ <class-decl name='extmnttab' size-in-bits='320' is-struct='yes' visibility='default' id='0c544dc0'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='mnt_mountp' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='mnt_fstype' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='mnt_mntopts' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='mnt_major' type-id='3502e3ff' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='mnt_minor' type-id='3502e3ff' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='stat64' size-in-bits='1152' is-struct='yes' visibility='default' id='0bbec9cd'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='st_dev' type-id='35ed8932' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='st_ino' type-id='71288a47' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='st_nlink' type-id='80f0b9df' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='st_mode' type-id='e1c52942' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='224'>
+ <var-decl name='st_uid' type-id='cc5fcceb' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='st_gid' type-id='d94ec6d9' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='__pad0' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='st_rdev' type-id='35ed8932' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='st_size' type-id='79989e9c' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='st_blksize' type-id='d3f10a7f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='st_blocks' type-id='4e711bf1' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='st_atim' type-id='a9c79a1f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='704'>
+ <var-decl name='st_mtim' type-id='a9c79a1f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='832'>
+ <var-decl name='st_ctim' type-id='a9c79a1f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='960'>
+ <var-decl name='__glibc_reserved' type-id='083f8d58' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='__dev_t' type-id='7359adad' id='35ed8932'/>
+ <typedef-decl name='__ino64_t' type-id='7359adad' id='71288a47'/>
+ <typedef-decl name='__nlink_t' type-id='7359adad' id='80f0b9df'/>
+ <typedef-decl name='__uid_t' type-id='f0981eeb' id='cc5fcceb'/>
+ <typedef-decl name='__gid_t' type-id='f0981eeb' id='d94ec6d9'/>
+ <typedef-decl name='__off_t' type-id='bd54fe1a' id='79989e9c'/>
+ <typedef-decl name='__blksize_t' type-id='bd54fe1a' id='d3f10a7f'/>
+ <typedef-decl name='__blkcnt64_t' type-id='bd54fe1a' id='4e711bf1'/>
+ <class-decl name='timespec' size-in-bits='128' is-struct='yes' visibility='default' id='a9c79a1f'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='tv_sec' type-id='65eda9c0' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='tv_nsec' type-id='03085adc' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='__time_t' type-id='bd54fe1a' id='65eda9c0'/>
+ <typedef-decl name='__syscall_slong_t' type-id='bd54fe1a' id='03085adc'/>
+ <typedef-decl name='FILE' type-id='ec1ed955' id='aa12d1ba'/>
+ <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='ec1ed955'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='_flags' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='_IO_read_ptr' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='_IO_read_end' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='_IO_read_base' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='_IO_write_base' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='_IO_write_ptr' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='_IO_write_end' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='_IO_buf_base' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='_IO_buf_end' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='_IO_save_base' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='640'>
+ <var-decl name='_IO_backup_base' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='704'>
+ <var-decl name='_IO_save_end' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='768'>
+ <var-decl name='_markers' type-id='e4c6fa61' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='832'>
+ <var-decl name='_chain' type-id='dca988a5' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='896'>
+ <var-decl name='_fileno' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='928'>
+ <var-decl name='_flags2' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='960'>
+ <var-decl name='_old_offset' type-id='79989e9c' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1024'>
+ <var-decl name='_cur_column' type-id='8efea9e5' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1040'>
+ <var-decl name='_vtable_offset' type-id='28577a57' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1048'>
+ <var-decl name='_shortbuf' type-id='89feb1ec' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1088'>
+ <var-decl name='_lock' type-id='cecf4ea7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1152'>
+ <var-decl name='_offset' type-id='724e4de6' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1216'>
+ <var-decl name='__pad1' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1280'>
+ <var-decl name='__pad2' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1344'>
+ <var-decl name='__pad3' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1408'>
+ <var-decl name='__pad4' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1472'>
+ <var-decl name='__pad5' type-id='b59d7dce' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1536'>
+ <var-decl name='_mode' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1568'>
+ <var-decl name='_unused2' type-id='664ac0b7' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='_IO_marker' size-in-bits='192' is-struct='yes' visibility='default' id='010ae0b9'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='_next' type-id='e4c6fa61' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='_sbuf' type-id='dca988a5' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='_pos' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='_IO_lock_t' type-id='48b5725f' id='bb4788fa'/>
+ <typedef-decl name='__off64_t' type-id='bd54fe1a' id='724e4de6'/>
+ <class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='1b055409'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='mnt_mountp' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='mnt_fstype' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='mnt_mntopts' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <pointer-type-def type-id='aa12d1ba' size-in-bits='64' id='822cd80b'/>
+ <pointer-type-def type-id='ec1ed955' size-in-bits='64' id='dca988a5'/>
+ <pointer-type-def type-id='bb4788fa' size-in-bits='64' id='cecf4ea7'/>
+ <pointer-type-def type-id='010ae0b9' size-in-bits='64' id='e4c6fa61'/>
+ <pointer-type-def type-id='0c544dc0' size-in-bits='64' id='394fc496'/>
+ <pointer-type-def type-id='1b055409' size-in-bits='64' id='9d424d31'/>
+ <pointer-type-def type-id='0bbec9cd' size-in-bits='64' id='62f7a03d'/>
+ <function-decl name='getextmntent' mangled-name='getextmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getextmntent'>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='394fc496' name='entry'/>
+ <parameter type-id='62f7a03d' name='statbuf'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='strdup' mangled-name='strdup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <function-decl name='getmntany' mangled-name='getmntany' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getmntany'>
+ <parameter type-id='822cd80b' name='fp'/>
+ <parameter type-id='9d424d31' name='mgetp'/>
+ <parameter type-id='9d424d31' name='mrefp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='mkdir' mangled-name='mkdir' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <function-decl name='_sol_getmntent' mangled-name='_sol_getmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='_sol_getmntent'>
+ <parameter type-id='822cd80b' name='fp'/>
+ <parameter type-id='9d424d31' name='mgetp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='access' mangled-name='access' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <pointer-type-def type-id='a84c031d' size-in-bits='64' id='26a90f95'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/zone.c' language='LANG_C99'>
+ <typedef-decl name='zoneid_t' type-id='95e97e5e' id='4da03624'/>
+ <function-decl name='getzoneid' mangled-name='getzoneid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getzoneid'>
+ <return type-id='4da03624'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='page.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='page.c' language='LANG_C99'>
<function-decl name='spl_pagesize' mangled-name='spl_pagesize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='spl_pagesize'>
- <return type-id='type-id-8'/>
- </function-decl>
- <function-decl name='sysconf' mangled-name='sysconf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <return type-id='b59d7dce'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='strlcat.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='strlcat.c' language='LANG_C99'>
<function-decl name='strlcat' mangled-name='strlcat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcat'>
- <parameter type-id='type-id-2' name='dst'/>
- <parameter type-id='type-id-4' name='src'/>
- <parameter type-id='type-id-8' name='dstsize'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='26a90f95' name='dst'/>
+ <parameter type-id='80f4b756' name='src'/>
+ <parameter type-id='b59d7dce' name='dstsize'/>
+ <return type-id='b59d7dce'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='strlcpy.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='strlcpy.c' language='LANG_C99'>
<function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcpy'>
- <parameter type-id='type-id-2' name='dst'/>
- <parameter type-id='type-id-4' name='src'/>
- <parameter type-id='type-id-8' name='len'/>
- <return type-id='type-id-8'/>
+ <parameter type-id='26a90f95' name='dst'/>
+ <parameter type-id='80f4b756' name='src'/>
+ <parameter type-id='b59d7dce' name='len'/>
+ <return type-id='b59d7dce'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='timestamp.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
+ <abi-instr version='1.0' address-size='64' path='timestamp.c' language='LANG_C99'>
<function-decl name='print_timestamp' mangled-name='print_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='print_timestamp'>
- <parameter type-id='type-id-62' name='timestamp_fmt'/>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='__builtin_puts' mangled-name='puts' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='localtime' mangled-name='localtime' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='strftime' mangled-name='strftime' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='__printf_chk' mangled-name='__printf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='time' mangled-name='time' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='nl_langinfo' mangled-name='nl_langinfo' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/getexecname.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='__readlink_alias' mangled-name='readlink' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <parameter type-id='3502e3ff' name='timestamp_fmt'/>
+ <return type-id='48b5725f'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/gethostid.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='get_system_hostid' mangled-name='get_system_hostid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_system_hostid'>
- <return type-id='type-id-7'/>
- </function-decl>
- <function-decl name='__open_alias' mangled-name='open64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='__read_alias' mangled-name='read' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
- </function-decl>
- <function-decl name='close' mangled-name='close' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <abi-instr version='1.0' address-size='64' path='uu_alloc.c' language='LANG_C99'>
+ <type-decl name='char' size-in-bits='8' id='a84c031d'/>
+ <type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
+ <type-decl name='variadic parameter type' id='2c1145c5'/>
+ <type-decl name='void' id='48b5725f'/>
+ <typedef-decl name='size_t' type-id='7359adad' id='b59d7dce'/>
+ <pointer-type-def type-id='a84c031d' size-in-bits='64' id='26a90f95'/>
+ <qualified-type-def type-id='a84c031d' const='yes' id='9b45d938'/>
+ <pointer-type-def type-id='9b45d938' size-in-bits='64' id='80f4b756'/>
+ <pointer-type-def type-id='48b5725f' size-in-bits='64' id='eaa32e2f'/>
+ <function-decl name='uu_msprintf' mangled-name='uu_msprintf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_msprintf'>
+ <parameter type-id='80f4b756' name='format'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='26a90f95'/>
</function-decl>
- <function-decl name='getenv' mangled-name='getenv' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <function-decl name='uu_memdup' mangled-name='uu_memdup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_memdup'>
+ <parameter type-id='eaa32e2f' name='buf'/>
+ <parameter type-id='b59d7dce' name='sz'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='strtoull' mangled-name='strtoull' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <function-decl name='uu_strndup' mangled-name='uu_strndup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strndup'>
+ <parameter type-id='80f4b756' name='s'/>
+ <parameter type-id='b59d7dce' name='n'/>
+ <return type-id='26a90f95'/>
</function-decl>
- <function-decl name='fopen' mangled-name='fopen64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <function-decl name='uu_strdup' mangled-name='uu_strdup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strdup'>
+ <parameter type-id='80f4b756' name='str'/>
+ <return type-id='26a90f95'/>
</function-decl>
- <function-decl name='fscanf' mangled-name='fscanf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <function-decl name='uu_free' mangled-name='uu_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_free'>
+ <parameter type-id='eaa32e2f' name='p'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='fclose' mangled-name='fclose' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <function-decl name='uu_zalloc' mangled-name='uu_zalloc' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_zalloc'>
+ <parameter type-id='b59d7dce' name='n'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/getmntany.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <class-decl name='extmnttab' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-120'>
+ <abi-instr version='1.0' address-size='64' path='uu_avl.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='bf311473' size-in-bits='128' id='f0f65199'>
+ <subrange length='2' type-id='7359adad' id='52efc4ef'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='320' id='36c46961'>
+ <subrange length='40' type-id='7359adad' id='8f80b239'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='512' id='59daf3ef'>
+ <subrange length='64' type-id='7359adad' id='b10be967'/>
+ </array-type-def>
+ <type-decl name='int' size-in-bits='32' id='95e97e5e'/>
+ <type-decl name='long int' size-in-bits='64' id='bd54fe1a'/>
+ <type-decl name='short int' size-in-bits='16' id='a2185560'/>
+ <type-decl name='signed char' size-in-bits='8' id='28577a57'/>
+ <array-type-def dimensions='1' type-id='e475ab95' size-in-bits='192' id='0ce65a8b'>
+ <subrange length='3' type-id='7359adad' id='56f209d2'/>
+ </array-type-def>
+ <type-decl name='unsigned char' size-in-bits='8' id='002ac4a6'/>
+ <type-decl name='unsigned int' size-in-bits='32' id='f0981eeb'/>
+ <class-decl name='uu_avl' size-in-bits='960' is-struct='yes' visibility='default' id='4af029d1'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='ua_next_enc' type-id='e475ab95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='ua_prev_enc' type-id='e475ab95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='ua_pool' type-id='de82c773' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='ua_parent_enc' type-id='e475ab95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='ua_debug' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='264'>
+ <var-decl name='ua_index' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='ua_tree' type-id='b351119f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='640'>
+ <var-decl name='ua_null_walk' type-id='edd8457b' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uintptr_t' type-id='7359adad' id='e475ab95'/>
+ <class-decl name='uu_avl_pool' size-in-bits='2176' is-struct='yes' visibility='default' id='12a530a8'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='uap_next' type-id='de82c773' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='uap_prev' type-id='de82c773' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='uap_name' type-id='59daf3ef' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='640'>
+ <var-decl name='uap_nodeoffset' type-id='b59d7dce' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='704'>
+ <var-decl name='uap_objsize' type-id='b59d7dce' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='768'>
+ <var-decl name='uap_cmp' type-id='d502b39f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='832'>
+ <var-decl name='uap_debug' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='840'>
+ <var-decl name='uap_last_index' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='896'>
+ <var-decl name='uap_lock' type-id='7a6844eb' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1216'>
+ <var-decl name='uap_null_avl' type-id='bb7f0973' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uu_avl_pool_t' type-id='12a530a8' id='7f84e390'/>
+ <typedef-decl name='uu_compare_fn_t' type-id='add6e811' id='40f93560'/>
+ <typedef-decl name='uint8_t' type-id='c51d6389' id='b96825af'/>
+ <typedef-decl name='__uint8_t' type-id='002ac4a6' id='c51d6389'/>
+ <typedef-decl name='pthread_mutex_t' type-id='c4794498' id='7a6844eb'/>
+ <union-decl name='__anonymous_union__' size-in-bits='320' is-anonymous='yes' visibility='default' id='c4794498'>
+ <data-member access='private'>
+ <var-decl name='__data' type-id='4c734837' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='__size' type-id='36c46961' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='__align' type-id='bd54fe1a' visibility='default'/>
+ </data-member>
+ </union-decl>
+ <class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='4c734837'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_special' type-id='type-id-2' visibility='default'/>
+ <var-decl name='__lock' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='__count' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_mountp' type-id='type-id-2' visibility='default'/>
+ <var-decl name='__owner' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='__nusers' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_fstype' type-id='type-id-2' visibility='default'/>
+ <var-decl name='__kind' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='__spins' type-id='a2185560' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='176'>
+ <var-decl name='__elision' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_mntopts' type-id='type-id-2' visibility='default'/>
+ <var-decl name='__list' type-id='518fb49c' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='mnt_major' type-id='type-id-62' visibility='default'/>
+ </class-decl>
+ <typedef-decl name='__pthread_list_t' type-id='0e01899c' id='518fb49c'/>
+ <class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='0e01899c'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='__prev' type-id='4d98cd5a' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='mnt_minor' type-id='type-id-62' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='__next' type-id='4d98cd5a' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-120' size-in-bits='64' id='type-id-121'/>
- <class-decl name='stat64' size-in-bits='1152' is-struct='yes' visibility='default' id='type-id-122'>
+ <typedef-decl name='uu_avl_t' type-id='4af029d1' id='bb7f0973'/>
+ <class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='b351119f'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='st_dev' type-id='type-id-123' visibility='default'/>
+ <var-decl name='avl_root' type-id='bf311473' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='st_ino' type-id='type-id-124' visibility='default'/>
+ <var-decl name='avl_compar' type-id='585e1de9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='st_nlink' type-id='type-id-125' visibility='default'/>
+ <var-decl name='avl_offset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='st_mode' type-id='type-id-118' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='st_uid' type-id='type-id-126' visibility='default'/>
+ <var-decl name='avl_numnodes' type-id='ee1f298e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='st_gid' type-id='type-id-127' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='__pad0' type-id='type-id-22' visibility='default'/>
+ <var-decl name='avl_pad' type-id='b59d7dce' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='st_rdev' type-id='type-id-123' visibility='default'/>
+ </class-decl>
+ <class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='428b67b3'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='avl_child' type-id='f0f65199' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='st_size' type-id='type-id-128' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='avl_pcb' type-id='e475ab95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='st_blksize' type-id='type-id-129' visibility='default'/>
+ </class-decl>
+ <typedef-decl name='ulong_t' type-id='7359adad' id='ee1f298e'/>
+ <class-decl name='uu_avl_walk' size-in-bits='320' is-struct='yes' visibility='default' id='e70a39e3'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='uaw_next' type-id='5842d146' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='st_blocks' type-id='type-id-130' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='uaw_prev' type-id='5842d146' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='st_atim' type-id='type-id-131' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='uaw_avl' type-id='a5c21a38' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='st_mtim' type-id='type-id-131' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='uaw_next_result' type-id='eaa32e2f' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='st_ctim' type-id='type-id-131' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='uaw_dir' type-id='ee31ee44' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='__glibc_reserved' type-id='type-id-132' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='264'>
+ <var-decl name='uaw_robust' type-id='b96825af' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__dev_t' type-id='type-id-7' id='type-id-123'/>
- <typedef-decl name='__ino64_t' type-id='type-id-7' id='type-id-124'/>
- <typedef-decl name='__nlink_t' type-id='type-id-7' id='type-id-125'/>
- <typedef-decl name='__uid_t' type-id='type-id-31' id='type-id-126'/>
- <typedef-decl name='__gid_t' type-id='type-id-31' id='type-id-127'/>
- <typedef-decl name='__off_t' type-id='type-id-30' id='type-id-128'/>
- <typedef-decl name='__blksize_t' type-id='type-id-30' id='type-id-129'/>
- <typedef-decl name='__blkcnt64_t' type-id='type-id-30' id='type-id-130'/>
- <class-decl name='timespec' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-131'>
+ <typedef-decl name='uu_avl_walk_t' type-id='e70a39e3' id='edd8457b'/>
+ <typedef-decl name='int8_t' type-id='2171a512' id='ee31ee44'/>
+ <typedef-decl name='__int8_t' type-id='28577a57' id='2171a512'/>
+ <typedef-decl name='uu_avl_index_t' type-id='e475ab95' id='5d7f5fc8'/>
+ <typedef-decl name='uu_walk_fn_t' type-id='96ee24a5' id='9d1aa0dc'/>
+ <typedef-decl name='uint32_t' type-id='62f1140c' id='8f92235e'/>
+ <typedef-decl name='__uint32_t' type-id='f0981eeb' id='62f1140c'/>
+ <typedef-decl name='uu_avl_node_t' type-id='f65f4326' id='73a65116'/>
+ <class-decl name='uu_avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='f65f4326'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tv_sec' type-id='type-id-133' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tv_nsec' type-id='type-id-134' visibility='default'/>
+ <var-decl name='uan_opaque' type-id='0ce65a8b' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__time_t' type-id='type-id-30' id='type-id-133'/>
- <typedef-decl name='__syscall_slong_t' type-id='type-id-30' id='type-id-134'/>
-
- <array-type-def dimensions='1' type-id='type-id-134' size-in-bits='192' id='type-id-132'>
- <subrange length='3' type-id='type-id-7' id='type-id-59'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-122' size-in-bits='64' id='type-id-135'/>
- <function-decl name='getextmntent' mangled-name='getextmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getextmntent'>
- <parameter type-id='type-id-4' name='path'/>
- <parameter type-id='type-id-121' name='entry'/>
- <parameter type-id='type-id-135' name='statbuf'/>
- <return type-id='type-id-22'/>
+ <pointer-type-def type-id='0e01899c' size-in-bits='64' id='4d98cd5a'/>
+ <pointer-type-def type-id='428b67b3' size-in-bits='64' id='bf311473'/>
+ <pointer-type-def type-id='96ee24a5' size-in-bits='64' id='585e1de9'/>
+ <pointer-type-def type-id='5d7f5fc8' size-in-bits='64' id='813a2225'/>
+ <pointer-type-def type-id='73a65116' size-in-bits='64' id='2dc35b9d'/>
+ <pointer-type-def type-id='7f84e390' size-in-bits='64' id='de82c773'/>
+ <pointer-type-def type-id='bb7f0973' size-in-bits='64' id='a5c21a38'/>
+ <pointer-type-def type-id='edd8457b' size-in-bits='64' id='5842d146'/>
+ <pointer-type-def type-id='40f93560' size-in-bits='64' id='d502b39f'/>
+ <pointer-type-def type-id='9d1aa0dc' size-in-bits='64' id='30a42b6d'/>
+ <pointer-type-def type-id='eaa32e2f' size-in-bits='64' id='63e171df'/>
+ <function-decl name='uu_avl_release' mangled-name='uu_avl_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_release'>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_avl_lockup' mangled-name='uu_avl_lockup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_lockup'>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_avl_nearest_prev' mangled-name='uu_avl_nearest_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_nearest_prev'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <parameter type-id='5d7f5fc8' name='idx'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_avl_nearest_next' mangled-name='uu_avl_nearest_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_nearest_next'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <parameter type-id='5d7f5fc8' name='idx'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_avl_insert' mangled-name='uu_avl_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_insert'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <parameter type-id='eaa32e2f' name='elem'/>
+ <parameter type-id='5d7f5fc8' name='idx'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_avl_find' mangled-name='uu_avl_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_find'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <parameter type-id='eaa32e2f' name='elem'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <parameter type-id='813a2225' name='out'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_avl_teardown' mangled-name='uu_avl_teardown' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_teardown'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <parameter type-id='63e171df' name='cookie'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_avl_remove' mangled-name='uu_avl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_remove'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <parameter type-id='eaa32e2f' name='elem'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_avl_walk' mangled-name='uu_avl_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <parameter type-id='30a42b6d' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <parameter type-id='8f92235e' name='flags'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='uu_avl_walk_end' mangled-name='uu_avl_walk_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_end'>
+ <parameter type-id='5842d146' name='wp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_avl_walk_next' mangled-name='uu_avl_walk_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_next'>
+ <parameter type-id='5842d146' name='wp'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_avl_walk_start' mangled-name='uu_avl_walk_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_walk_start'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <parameter type-id='8f92235e' name='flags'/>
+ <return type-id='5842d146'/>
+ </function-decl>
+ <function-decl name='uu_avl_prev' mangled-name='uu_avl_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_prev'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <parameter type-id='eaa32e2f' name='node'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_avl_next' mangled-name='uu_avl_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_next'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <parameter type-id='eaa32e2f' name='node'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_avl_last' mangled-name='uu_avl_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_last'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_avl_first' mangled-name='uu_avl_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_first'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_avl_numnodes' mangled-name='uu_avl_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_numnodes'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <return type-id='b59d7dce'/>
+ </function-decl>
+ <function-decl name='uu_avl_destroy' mangled-name='uu_avl_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_destroy'>
+ <parameter type-id='a5c21a38' name='ap'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_avl_create' mangled-name='uu_avl_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_create'>
+ <parameter type-id='de82c773' name='pp'/>
+ <parameter type-id='eaa32e2f' name='parent'/>
+ <parameter type-id='8f92235e' name='flags'/>
+ <return type-id='a5c21a38'/>
+ </function-decl>
+ <function-decl name='uu_avl_node_fini' mangled-name='uu_avl_node_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_node_fini'>
+ <parameter type-id='eaa32e2f' name='base'/>
+ <parameter type-id='2dc35b9d' name='np'/>
+ <parameter type-id='de82c773' name='pp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_avl_node_init' mangled-name='uu_avl_node_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_node_init'>
+ <parameter type-id='eaa32e2f' name='base'/>
+ <parameter type-id='2dc35b9d' name='np'/>
+ <parameter type-id='de82c773' name='pp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_avl_pool_destroy' mangled-name='uu_avl_pool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_pool_destroy'>
+ <parameter type-id='de82c773' name='pp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_avl_pool_create' mangled-name='uu_avl_pool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_avl_pool_create'>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='b59d7dce' name='objsize'/>
+ <parameter type-id='b59d7dce' name='nodeoffset'/>
+ <parameter type-id='d502b39f' name='compare_func'/>
+ <parameter type-id='8f92235e' name='flags'/>
+ <return type-id='de82c773'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='96ee24a5'>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='eaa32e2f'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='add6e811'>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='eaa32e2f'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='uu_ident.c' language='LANG_C99'>
+ <typedef-decl name='uint_t' type-id='f0981eeb' id='3502e3ff'/>
+ <function-decl name='uu_check_name' mangled-name='uu_check_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_check_name'>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='3502e3ff' name='flags'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='type-id-136'>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='uu_list.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='e475ab95' size-in-bits='128' id='d0e9cdae'>
+ <subrange length='2' type-id='7359adad' id='52efc4ef'/>
+ </array-type-def>
+ <class-decl name='uu_list' size-in-bits='896' is-struct='yes' visibility='default' id='1d04bdf0'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_flags' type-id='type-id-22' visibility='default'/>
+ <var-decl name='ul_next_enc' type-id='e475ab95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_IO_read_ptr' type-id='type-id-2' visibility='default'/>
+ <var-decl name='ul_prev_enc' type-id='e475ab95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_IO_read_end' type-id='type-id-2' visibility='default'/>
+ <var-decl name='ul_pool' type-id='0941e04e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='_IO_read_base' type-id='type-id-2' visibility='default'/>
+ <var-decl name='ul_parent_enc' type-id='e475ab95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='_IO_write_base' type-id='type-id-2' visibility='default'/>
+ <var-decl name='ul_offset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='_IO_write_ptr' type-id='type-id-2' visibility='default'/>
+ <var-decl name='ul_numnodes' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='_IO_write_end' type-id='type-id-2' visibility='default'/>
+ <var-decl name='ul_debug' type-id='b96825af' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='_IO_buf_base' type-id='type-id-2' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='392'>
+ <var-decl name='ul_sorted' type-id='b96825af' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='_IO_buf_end' type-id='type-id-2' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='400'>
+ <var-decl name='ul_index' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='ul_null_node' type-id='8e5864b0' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='_IO_save_base' type-id='type-id-2' visibility='default'/>
+ <var-decl name='ul_null_walk' type-id='9fed32d2' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='uu_list_pool' size-in-bits='2112' is-struct='yes' visibility='default' id='55168cab'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='ulp_next' type-id='0941e04e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='ulp_prev' type-id='0941e04e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='ulp_name' type-id='59daf3ef' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='_IO_backup_base' type-id='type-id-2' visibility='default'/>
+ <var-decl name='ulp_nodeoffset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='_IO_save_end' type-id='type-id-2' visibility='default'/>
+ <var-decl name='ulp_objsize' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='_markers' type-id='type-id-137' visibility='default'/>
+ <var-decl name='ulp_cmp' type-id='d502b39f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='_chain' type-id='type-id-138' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='_fileno' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='928'>
- <var-decl name='_flags2' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='_old_offset' type-id='type-id-128' visibility='default'/>
+ <var-decl name='ulp_debug' type-id='b96825af' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='_cur_column' type-id='type-id-98' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1040'>
- <var-decl name='_vtable_offset' type-id='type-id-48' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1048'>
- <var-decl name='_shortbuf' type-id='type-id-139' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='840'>
+ <var-decl name='ulp_last_index' type-id='b96825af' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='_offset' type-id='type-id-140' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='896'>
+ <var-decl name='ulp_lock' type-id='7a6844eb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='__pad1' type-id='type-id-6' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='__pad2' type-id='type-id-6' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1344'>
- <var-decl name='__pad3' type-id='type-id-6' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1408'>
- <var-decl name='__pad4' type-id='type-id-6' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1472'>
- <var-decl name='__pad5' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1536'>
- <var-decl name='_mode' type-id='type-id-22' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='1568'>
- <var-decl name='_unused2' type-id='type-id-141' visibility='default'/>
+ <var-decl name='ulp_null_list' type-id='82e88484' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='_IO_marker' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-142'>
+ <typedef-decl name='uu_list_pool_t' type-id='55168cab' id='38a2549d'/>
+ <typedef-decl name='uu_list_t' type-id='1d04bdf0' id='82e88484'/>
+ <typedef-decl name='uu_list_node_impl_t' type-id='700a795c' id='8e5864b0'/>
+ <class-decl name='uu_list_node_impl' size-in-bits='128' is-struct='yes' visibility='default' id='700a795c'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_next' type-id='type-id-137' visibility='default'/>
+ <var-decl name='uln_next' type-id='5af1298a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_sbuf' type-id='type-id-138' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_pos' type-id='type-id-22' visibility='default'/>
+ <var-decl name='uln_prev' type-id='5af1298a' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-142' size-in-bits='64' id='type-id-137'/>
- <pointer-type-def type-id='type-id-136' size-in-bits='64' id='type-id-138'/>
-
- <array-type-def dimensions='1' type-id='type-id-1' size-in-bits='8' id='type-id-139'>
- <subrange length='1' type-id='type-id-7' id='type-id-143'/>
-
- </array-type-def>
- <typedef-decl name='__off64_t' type-id='type-id-30' id='type-id-140'/>
-
- <array-type-def dimensions='1' type-id='type-id-1' size-in-bits='160' id='type-id-141'>
- <subrange length='20' type-id='type-id-7' id='type-id-144'/>
-
- </array-type-def>
- <typedef-decl name='FILE' type-id='type-id-136' id='type-id-145'/>
- <pointer-type-def type-id='type-id-145' size-in-bits='64' id='type-id-146'/>
- <class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-147'>
+ <class-decl name='uu_list_walk' size-in-bits='320' is-struct='yes' visibility='default' id='b80e3208'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_special' type-id='type-id-2' visibility='default'/>
+ <var-decl name='ulw_next' type-id='4d848103' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_mountp' type-id='type-id-2' visibility='default'/>
+ <var-decl name='ulw_prev' type-id='4d848103' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_fstype' type-id='type-id-2' visibility='default'/>
+ <var-decl name='ulw_list' type-id='0c0b229b' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_mntopts' type-id='type-id-2' visibility='default'/>
+ <var-decl name='ulw_dir' type-id='ee31ee44' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='200'>
+ <var-decl name='ulw_robust' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='ulw_next_result' type-id='a085247f' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-147' size-in-bits='64' id='type-id-148'/>
- <function-decl name='getmntany' mangled-name='getmntany' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getmntany'>
- <parameter type-id='type-id-146' name='fp'/>
- <parameter type-id='type-id-148' name='mgetp'/>
- <parameter type-id='type-id-148' name='mrefp'/>
- <return type-id='type-id-22'/>
+ <typedef-decl name='uu_list_walk_t' type-id='b80e3208' id='9fed32d2'/>
+ <typedef-decl name='uu_list_index_t' type-id='e475ab95' id='f0dd35ff'/>
+ <typedef-decl name='uu_list_node_t' type-id='f8f3cec5' id='c4dc472f'/>
+ <class-decl name='uu_list_node' size-in-bits='128' is-struct='yes' visibility='default' id='f8f3cec5'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='uln_opaque' type-id='d0e9cdae' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <pointer-type-def type-id='f0dd35ff' size-in-bits='64' id='ecbc0046'/>
+ <pointer-type-def type-id='700a795c' size-in-bits='64' id='5af1298a'/>
+ <pointer-type-def type-id='8e5864b0' size-in-bits='64' id='a085247f'/>
+ <pointer-type-def type-id='c4dc472f' size-in-bits='64' id='dbe143f4'/>
+ <pointer-type-def type-id='38a2549d' size-in-bits='64' id='0941e04e'/>
+ <pointer-type-def type-id='82e88484' size-in-bits='64' id='0c0b229b'/>
+ <pointer-type-def type-id='9fed32d2' size-in-bits='64' id='4d848103'/>
+ <function-decl name='uu_list_release' mangled-name='uu_list_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_release'>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='_sol_getmntent' mangled-name='_sol_getmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='_sol_getmntent'>
- <parameter type-id='type-id-146' name='fp'/>
- <parameter type-id='type-id-148' name='mgetp'/>
- <return type-id='type-id-22'/>
+ <function-decl name='uu_list_lockup' mangled-name='uu_list_lockup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_lockup'>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_list_prev' mangled-name='uu_list_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_prev'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <parameter type-id='eaa32e2f' name='elem'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_list_next' mangled-name='uu_list_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_next'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <parameter type-id='eaa32e2f' name='elem'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_list_last' mangled-name='uu_list_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_last'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_list_first' mangled-name='uu_list_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_first'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_list_numnodes' mangled-name='uu_list_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_numnodes'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <return type-id='b59d7dce'/>
+ </function-decl>
+ <function-decl name='uu_list_insert_after' mangled-name='uu_list_insert_after' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert_after'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <parameter type-id='eaa32e2f' name='target'/>
+ <parameter type-id='eaa32e2f' name='elem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='uu_list_insert_before' mangled-name='uu_list_insert_before' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert_before'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <parameter type-id='eaa32e2f' name='target'/>
+ <parameter type-id='eaa32e2f' name='elem'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='uu_list_teardown' mangled-name='uu_list_teardown' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_teardown'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <parameter type-id='63e171df' name='cookie'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_list_remove' mangled-name='uu_list_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_remove'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <parameter type-id='eaa32e2f' name='elem'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_list_walk' mangled-name='uu_list_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <parameter type-id='30a42b6d' name='func'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <parameter type-id='8f92235e' name='flags'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='uu_list_walk_end' mangled-name='uu_list_walk_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_end'>
+ <parameter type-id='4d848103' name='wp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_list_walk_next' mangled-name='uu_list_walk_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_next'>
+ <parameter type-id='4d848103' name='wp'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_list_walk_start' mangled-name='uu_list_walk_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_walk_start'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <parameter type-id='8f92235e' name='flags'/>
+ <return type-id='4d848103'/>
+ </function-decl>
+ <function-decl name='uu_list_nearest_prev' mangled-name='uu_list_nearest_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_nearest_prev'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <parameter type-id='f0dd35ff' name='idx'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_list_nearest_next' mangled-name='uu_list_nearest_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_nearest_next'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <parameter type-id='f0dd35ff' name='idx'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_list_find' mangled-name='uu_list_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_find'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <parameter type-id='eaa32e2f' name='elem'/>
+ <parameter type-id='eaa32e2f' name='private'/>
+ <parameter type-id='ecbc0046' name='out'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-decl name='uu_list_insert' mangled-name='uu_list_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_insert'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <parameter type-id='eaa32e2f' name='elem'/>
+ <parameter type-id='f0dd35ff' name='idx'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_list_destroy' mangled-name='uu_list_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_destroy'>
+ <parameter type-id='0c0b229b' name='lp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_list_create' mangled-name='uu_list_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_create'>
+ <parameter type-id='0941e04e' name='pp'/>
+ <parameter type-id='eaa32e2f' name='parent'/>
+ <parameter type-id='8f92235e' name='flags'/>
+ <return type-id='0c0b229b'/>
</function-decl>
- <function-decl name='__xstat64' mangled-name='__xstat64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <function-decl name='uu_list_node_fini' mangled-name='uu_list_node_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_node_fini'>
+ <parameter type-id='eaa32e2f' name='base'/>
+ <parameter type-id='dbe143f4' name='np_arg'/>
+ <parameter type-id='0941e04e' name='pp'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='__builtin_fwrite' mangled-name='fwrite' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <function-decl name='uu_list_node_init' mangled-name='uu_list_node_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_node_init'>
+ <parameter type-id='eaa32e2f' name='base'/>
+ <parameter type-id='dbe143f4' name='np_arg'/>
+ <parameter type-id='0941e04e' name='pp'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='feof' mangled-name='feof' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <function-decl name='uu_list_pool_destroy' mangled-name='uu_list_pool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_pool_destroy'>
+ <parameter type-id='0941e04e' name='pp'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='getmntent_r' mangled-name='getmntent_r' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <function-decl name='uu_list_pool_create' mangled-name='uu_list_pool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_list_pool_create'>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='b59d7dce' name='objsize'/>
+ <parameter type-id='b59d7dce' name='nodeoffset'/>
+ <parameter type-id='d502b39f' name='compare_func'/>
+ <parameter type-id='8f92235e' name='flags'/>
+ <return type-id='0941e04e'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/zone.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <typedef-decl name='zoneid_t' type-id='type-id-22' id='type-id-149'/>
- <function-decl name='getzoneid' mangled-name='getzoneid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getzoneid'>
- <return type-id='type-id-149'/>
+ <abi-instr version='1.0' address-size='64' path='uu_misc.c' language='LANG_C99'>
+ <function-decl name='uu_panic' mangled-name='uu_panic' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_panic'>
+ <parameter type-id='80f4b756' name='format'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_strerror' mangled-name='uu_strerror' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strerror'>
+ <parameter type-id='8f92235e' name='code'/>
+ <return type-id='80f4b756'/>
+ </function-decl>
+ <function-decl name='uu_error' mangled-name='uu_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_error'>
+ <return type-id='8f92235e'/>
+ </function-decl>
+ <function-decl name='uu_set_error' mangled-name='uu_set_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_set_error'>
+ <parameter type-id='3502e3ff' name='code'/>
+ <return type-id='48b5725f'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='assert.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <var-decl name='libspl_assert_ok' type-id='type-id-22' mangled-name='libspl_assert_ok' visibility='default' elf-symbol-id='libspl_assert_ok'/>
- <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_assertf'>
- <parameter type-id='type-id-4' name='file'/>
- <parameter type-id='type-id-4' name='func'/>
- <parameter type-id='type-id-22' name='line'/>
- <parameter type-id='type-id-4' name='format'/>
+ <abi-instr version='1.0' address-size='64' path='uu_pname.c' language='LANG_C99'>
+ <class-decl name='__va_list_tag' size-in-bits='192' is-struct='yes' visibility='default' id='d5027220'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='gp_offset' type-id='f0981eeb' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='fp_offset' type-id='f0981eeb' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='overflow_arg_area' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='reg_save_area' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <pointer-type-def type-id='d5027220' size-in-bits='64' id='b7f2d5e6'/>
+ <pointer-type-def type-id='95e97e5e' size-in-bits='64' id='7292109c'/>
+ <var-decl name='uu_exit_ok_value' type-id='95e97e5e' mangled-name='uu_exit_ok_value' visibility='default' elf-symbol-id='uu_exit_ok_value'/>
+ <var-decl name='uu_exit_fatal_value' type-id='95e97e5e' mangled-name='uu_exit_fatal_value' visibility='default' elf-symbol-id='uu_exit_fatal_value'/>
+ <var-decl name='uu_exit_usage_value' type-id='95e97e5e' mangled-name='uu_exit_usage_value' visibility='default' elf-symbol-id='uu_exit_usage_value'/>
+ <function-decl name='uu_getpname' mangled-name='uu_getpname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_getpname'>
+ <return type-id='80f4b756'/>
+ </function-decl>
+ <function-decl name='uu_setpname' mangled-name='uu_setpname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_setpname'>
+ <parameter type-id='26a90f95' name='arg0'/>
+ <return type-id='80f4b756'/>
+ </function-decl>
+ <function-decl name='uu_xdie' mangled-name='uu_xdie' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_xdie'>
+ <parameter type-id='95e97e5e' name='status'/>
+ <parameter type-id='80f4b756' name='format'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_vxdie' mangled-name='uu_vxdie' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vxdie'>
+ <parameter type-id='95e97e5e' name='status'/>
+ <parameter type-id='80f4b756' name='format'/>
+ <parameter type-id='b7f2d5e6' name='alist'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_die' mangled-name='uu_die' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_die'>
+ <parameter type-id='80f4b756' name='format'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_vdie' mangled-name='uu_vdie' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vdie'>
+ <parameter type-id='80f4b756' name='format'/>
+ <parameter type-id='b7f2d5e6' name='alist'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_warn' mangled-name='uu_warn' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_warn'>
+ <parameter type-id='80f4b756' name='format'/>
<parameter is-variadic='yes'/>
- <return type-id='type-id-5'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_vwarn' mangled-name='uu_vwarn' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_vwarn'>
+ <parameter type-id='80f4b756' name='format'/>
+ <parameter type-id='b7f2d5e6' name='alist'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_alt_exit' mangled-name='uu_alt_exit' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_alt_exit'>
+ <parameter type-id='95e97e5e' name='profile'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='uu_exit_usage' mangled-name='uu_exit_usage' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_usage'>
+ <return type-id='7292109c'/>
+ </function-decl>
+ <function-decl name='uu_exit_fatal' mangled-name='uu_exit_fatal' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_fatal'>
+ <return type-id='7292109c'/>
</function-decl>
- <function-decl name='__builtin_fputc' mangled-name='fputc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <function-decl name='uu_exit_ok' mangled-name='uu_exit_ok' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_exit_ok'>
+ <return type-id='7292109c'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='pthread_atfork.c' comp-dir-path='/build/glibc-S9d2JN/glibc-2.27/nptl' language='LANG_C99'>
- <function-decl name='__register_atfork' mangled-name='__register_atfork' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-5'/>
+ <abi-instr version='1.0' address-size='64' path='uu_string.c' language='LANG_C99'>
+ <type-decl name='unnamed-enum-underlying-type-32' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='9cac1fee'/>
+ <typedef-decl name='boolean_t' type-id='08f5ca17' id='c19b74c3'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca17'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='B_FALSE' value='0'/>
+ <enumerator name='B_TRUE' value='1'/>
+ </enum-decl>
+ <function-decl name='uu_strbw' mangled-name='uu_strbw' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strbw'>
+ <parameter type-id='80f4b756' name='a'/>
+ <parameter type-id='80f4b756' name='b'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='uu_strcaseeq' mangled-name='uu_strcaseeq' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_strcaseeq'>
+ <parameter type-id='80f4b756' name='a'/>
+ <parameter type-id='80f4b756' name='b'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='uu_streq' mangled-name='uu_streq' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='uu_streq'>
+ <parameter type-id='80f4b756' name='a'/>
+ <parameter type-id='80f4b756' name='b'/>
+ <return type-id='c19b74c3'/>
</function-decl>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs.abi b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
index bb4bde2473bb..a2c79c8568ca 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs.abi
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs.abi
@@ -1,5415 +1,4175 @@
<abi-corpus architecture='elf-amd-x86_64' soname='libzfs.so.4'>
<elf-needed>
<dependency name='libzfs_core.so.3'/>
<dependency name='libnvpair.so.3'/>
<dependency name='libuutil.so.3'/>
<dependency name='libm.so.6'/>
<dependency name='libcrypto.so.1.1'/>
<dependency name='libz.so.1'/>
<dependency name='libpthread.so.0'/>
<dependency name='libc.so.6'/>
</elf-needed>
<elf-function-symbols>
<elf-symbol name='_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='bookmark_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='cityhash4' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='color_end' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='color_start' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='dataset_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='dataset_nestcheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='entity_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_incremental_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_incremental_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_2_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_impl_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_incremental_byteswap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_incremental_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_native' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_native_varsize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_dataset_depth' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getprop_uint64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='is_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_add_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_envvar_is_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_errno' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_action' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_description' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_error_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_free_str_array' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_cache' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_mnttab_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_print_on_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process_get_stdout' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_run_process_get_stdout_nopath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='mountpoint_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='permset_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='pool_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='printf_color' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_commit_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_disable_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_enable_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_errorstr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_is_shared' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='sa_validate_shareopts' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='snapshot_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_depends_on' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_is_supported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_is_valid_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_lookup_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_lookup_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_adjust_mount_options' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_allocatable_devs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_bookmark_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_clone' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_close' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_all_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_nfs_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_commit_smb_shares' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_component_namecheck' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_create_ancestors' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_attempt_load_keys' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_clone_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_get_encryption_root' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_load_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_rewrap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_crypto_unload_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dataset_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dataset_name_hidden' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_canonicalize_perm' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_verify_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_whokey' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy_snaps' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_destroy_snaps_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_foreach_mountpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_all_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_clones_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_fsacl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_holds' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_pool_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_pool_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_recvd_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_underlying_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_user_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_handle_dup' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_hold' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_hold_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_ioctl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_shared' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_shared_nfs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_is_shared_smb' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_bookmarks' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_children' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_dependents' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_filesystems' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_mounted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_root' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapshots_sorted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_iter_snapspec' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mod_supported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount_at' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_mount_delegation_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_name_valid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicestrtonum' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_open' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_parent_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_parse_mount_options' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_path_to_zhandle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_promote' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_column_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_default_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_default_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_delegatable' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_encryption_key_param' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_recvd' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_table' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_userquota' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_userquota_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_written' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_get_written_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_inherit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_inheritable' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_is_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_readonly' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_set_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_setonce' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_user' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_userquota' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_valid_for_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_valid_keylocation' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_visible' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prop_written' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_prune_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_receive' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_refresh_properties' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_release' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_rename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_rollback' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_save_arguments' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_progress' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_resume' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_resume_token_to_nvlist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_send_saved' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_set_fsacl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_share' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_share_nfs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_share_smb' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_shareall' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_show_diffs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_purge' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_smb_acl_rename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_snapshot' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_snapshot_nvl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_spa_version' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_spa_version_map' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_special_devs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_standard_error' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_type_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unmount' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unmountall' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshare' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshare_nfs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshare_smb' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall_bypath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall_bytype' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall_nfs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_unshareall_smb' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userspace' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_valid_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_kernel' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_print' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_version_userland' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_wait_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_zpl_version_map' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_clear_label' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_close' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_disable_datasets' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='zpool_disable_datasets_os' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
+ <elf-symbol name='zpool_disable_volume_os' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_discard_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_enable_datasets' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_events_seek' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_expand_proplist' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_explain_recover' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_export' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_export_force' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_feature_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_vdev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_vdev_by_physpath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_free_handles' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_config' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_errlog' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_features' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_handle' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_history' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_load_policy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_physpath' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_prop_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_state' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_state_str' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_get_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_import_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_in_use' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_initialize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_initialize_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_is_draid_spare' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_iter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_label_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_load_compat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_log_history' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_obj_to_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_obj_to_path_ds' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_open' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_open_canfail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_pool_state_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_print_unsup_feat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_align_right' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_column_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_default_numeric' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_default_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_feature' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_feature' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_table' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_get_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_readonly' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_setonce' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_unsupported' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_prop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_props_refresh' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_refresh_stats' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reguid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_reopen_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_scan' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_set_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_set_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_skip_pool' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_state_to_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_sync_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_trim' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_upgrade' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_attach' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_clear' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_degrade' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_detach' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_fault' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_indirect_size' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_name' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_offline' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_online' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_path_to_guid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_remove_cancel' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_vdev_split' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_wait_status' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_free_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_get_list' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_index_to_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_iter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_iter_common' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_name_to_prop' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_print_one_property' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_random_value' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_hidden' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_impl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_number' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_register_string' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_string_to_index' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_valid_for_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_values' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zprop_width' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zvol_volsize_to_reservation' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
<elf-variable-symbols>
<elf-symbol name='fletcher_4_abd_ops' size='24' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx2_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx512bw_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_avx512f_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_sse2_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_ssse3_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_superscalar4_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='fletcher_4_superscalar_ops' size='64' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_config_ops' size='16' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spa_feature_table' size='1904' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfeature_checks_disable' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_deleg_perm_tab' size='512' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_history_event_names' size='328' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_max_dataset_nesting' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_userquota_prop_prefixes' size='96' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-variable-symbols>
- <abi-instr version='1.0' address-size='64' path='libzfs_changelist.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <type-decl name='void' id='type-id-1'/>
- <function-decl name='zfs_alloc' mangled-name='zfs_alloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_pool_create' mangled-name='uu_avl_pool_create' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_create' mangled-name='uu_avl_create' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_get' mangled-name='zfs_prop_get' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_iter_dependents' mangled-name='zfs_iter_dependents' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_get_name' mangled-name='zfs_get_name' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_open' mangled-name='zfs_open' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_is_shared' mangled-name='zfs_is_shared' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_get_int' mangled-name='zfs_prop_get_int' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_node_init' mangled-name='uu_avl_node_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_find' mangled-name='uu_avl_find' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='free' mangled-name='free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_close' mangled-name='zfs_close' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_iter_children' mangled-name='zfs_iter_children' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_iter_mounted' mangled-name='zfs_iter_mounted' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_is_mounted' mangled-name='zfs_is_mounted' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_insert' mangled-name='uu_avl_insert' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_error' mangled-name='zfs_error' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__stack_chk_fail' mangled-name='__stack_chk_fail' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strcmp' mangled-name='strcmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_get_handle' mangled-name='zfs_get_handle' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='getzoneid' mangled-name='getzoneid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_walk_start' mangled-name='uu_avl_walk_start' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_remove' mangled-name='uu_avl_remove' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_walk_next' mangled-name='uu_avl_walk_next' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_walk_end' mangled-name='uu_avl_walk_end' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_destroy' mangled-name='uu_avl_destroy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_pool_destroy' mangled-name='uu_avl_pool_destroy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_unshare_proto' mangled-name='zfs_unshare_proto' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_commit_proto' mangled-name='zfs_commit_proto' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strlen' mangled-name='strlen' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strncmp' mangled-name='strncmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='remove_mountpoint' mangled-name='remove_mountpoint' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strlcat' mangled-name='strlcat' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_refresh_properties' mangled-name='zfs_refresh_properties' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_share_nfs' mangled-name='zfs_share_nfs' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_unshare_smb' mangled-name='zfs_unshare_smb' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_unshare_nfs' mangled-name='zfs_unshare_nfs' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_share_smb' mangled-name='zfs_share_smb' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_mount' mangled-name='zfs_mount' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <abi-instr version='1.0' address-size='64' path='libshare.c' language='LANG_C99'>
+ <function-decl name='sa_validate_shareopts' mangled-name='sa_validate_shareopts' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_validate_shareopts'>
+ <parameter type-id='26a90f95' name='options'/>
+ <parameter type-id='26a90f95' name='proto'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='uu_avl_last' mangled-name='uu_avl_last' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='sa_errorstr' mangled-name='sa_errorstr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_errorstr'>
+ <parameter type-id='95e97e5e' name='err'/>
+ <return type-id='26a90f95'/>
</function-decl>
- <function-decl name='zfs_commit_smb_shares' mangled-name='zfs_commit_smb_shares' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='sa_commit_shares' mangled-name='sa_commit_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_commit_shares'>
+ <parameter type-id='80f4b756' name='protocol'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zfs_commit_nfs_shares' mangled-name='zfs_commit_nfs_shares' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='sa_is_shared' mangled-name='sa_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_is_shared'>
+ <parameter type-id='80f4b756' name='mountpoint'/>
+ <parameter type-id='26a90f95' name='protocol'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='zfs_unmount' mangled-name='zfs_unmount' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='sa_disable_share' mangled-name='sa_disable_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_disable_share'>
+ <parameter type-id='80f4b756' name='mountpoint'/>
+ <parameter type-id='26a90f95' name='protocol'/>
+ <return type-id='95e97e5e'/>
</function-decl>
+ <function-decl name='sa_enable_share' mangled-name='sa_enable_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_enable_share'>
+ <parameter type-id='80f4b756' name='zfsname'/>
+ <parameter type-id='80f4b756' name='mountpoint'/>
+ <parameter type-id='80f4b756' name='shareopts'/>
+ <parameter type-id='26a90f95' name='protocol'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <pointer-type-def type-id='a84c031d' size-in-bits='64' id='26a90f95'/>
+ <pointer-type-def type-id='9b45d938' size-in-bits='64' id='80f4b756'/>
+ <type-decl name='int' size-in-bits='32' id='95e97e5e'/>
+ <typedef-decl name='boolean_t' type-id='08f5ca17' id='c19b74c3'/>
+ <type-decl name='void' id='48b5725f'/>
+ <type-decl name='char' size-in-bits='8' id='a84c031d'/>
+ <qualified-type-def type-id='a84c031d' const='yes' id='9b45d938'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca17'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='B_FALSE' value='0'/>
+ <enumerator name='B_TRUE' value='1'/>
+ </enum-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_config.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <type-decl name='int' size-in-bits='32' id='type-id-2'/>
- <class-decl name='libzfs_handle' size-in-bits='18240' is-struct='yes' visibility='default' id='type-id-3'>
+ <abi-instr version='1.0' address-size='64' path='os/linux/smb.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='2040' id='11641789'>
+ <subrange length='255' type-id='7359adad' id='36e7f891'/>
+ </array-type-def>
+ <typedef-decl name='smb_share_t' type-id='a75bc907' id='2d05afd9'/>
+ <class-decl name='smb_share_s' size-in-bits='36992' is-struct='yes' visibility='default' id='a75bc907'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='libzfs_error' type-id='type-id-2' visibility='default'/>
+ <var-decl name='name' type-id='11641789' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='libzfs_fd' type-id='type-id-2' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2040'>
+ <var-decl name='path' type-id='d16c6df4' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='34808'>
+ <var-decl name='comment' type-id='11641789' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='36864'>
+ <var-decl name='guest_ok' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='36928'>
+ <var-decl name='next' type-id='05ed1c5f' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <pointer-type-def type-id='a75bc907' size-in-bits='64' id='05ed1c5f'/>
+ <pointer-type-def type-id='2d05afd9' size-in-bits='64' id='a3e5c654'/>
+ <var-decl name='smb_shares' type-id='a3e5c654' visibility='default'/>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='32768' id='d16c6df4'>
+ <subrange length='4096' type-id='7359adad' id='bc1b5ddc'/>
+ </array-type-def>
+ <type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/cityhash.c' language='LANG_C99'>
+ <function-decl name='cityhash4' mangled-name='cityhash4' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='cityhash4'>
+ <parameter type-id='9c313c2d' name='w1'/>
+ <parameter type-id='9c313c2d' name='w2'/>
+ <parameter type-id='9c313c2d' name='w3'/>
+ <parameter type-id='9c313c2d' name='w4'/>
+ <return type-id='9c313c2d'/>
+ </function-decl>
+ <typedef-decl name='uint64_t' type-id='8910171f' id='9c313c2d'/>
+ <typedef-decl name='__uint64_t' type-id='7359adad' id='8910171f'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfeature_common.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='83f29ca2' size-in-bits='15232' id='d96379d0'>
+ <subrange length='34' type-id='7359adad' id='6a6a7e00'/>
+ </array-type-def>
+ <typedef-decl name='zfeature_info_t' type-id='1178d146' id='83f29ca2'/>
+ <class-decl name='zfeature_info' size-in-bits='448' is-struct='yes' visibility='default' id='1178d146'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='fi_feature' type-id='d6618c78' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='libzfs_pool_handles' type-id='type-id-4' visibility='default'/>
+ <var-decl name='fi_uname' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='libzfs_ns_avlpool' type-id='type-id-5' visibility='default'/>
+ <var-decl name='fi_guid' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='libzfs_ns_avl' type-id='type-id-6' visibility='default'/>
+ <var-decl name='fi_desc' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='libzfs_ns_gen' type-id='type-id-7' visibility='default'/>
+ <var-decl name='fi_flags' type-id='fc329033' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='fi_zfs_mod_supported' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='libzfs_desc_active' type-id='type-id-2' visibility='default'/>
+ <var-decl name='fi_type' type-id='732d2bb2' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='libzfs_action' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='fi_depends' type-id='1acff326' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='8544'>
- <var-decl name='libzfs_desc' type-id='type-id-8' visibility='default'/>
+ </class-decl>
+ <typedef-decl name='spa_feature_t' type-id='33ecb627' id='d6618c78'/>
+ <enum-decl name='spa_feature' id='33ecb627'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='SPA_FEATURE_NONE' value='-1'/>
+ <enumerator name='SPA_FEATURE_ASYNC_DESTROY' value='0'/>
+ <enumerator name='SPA_FEATURE_EMPTY_BPOBJ' value='1'/>
+ <enumerator name='SPA_FEATURE_LZ4_COMPRESS' value='2'/>
+ <enumerator name='SPA_FEATURE_MULTI_VDEV_CRASH_DUMP' value='3'/>
+ <enumerator name='SPA_FEATURE_SPACEMAP_HISTOGRAM' value='4'/>
+ <enumerator name='SPA_FEATURE_ENABLED_TXG' value='5'/>
+ <enumerator name='SPA_FEATURE_HOLE_BIRTH' value='6'/>
+ <enumerator name='SPA_FEATURE_EXTENSIBLE_DATASET' value='7'/>
+ <enumerator name='SPA_FEATURE_EMBEDDED_DATA' value='8'/>
+ <enumerator name='SPA_FEATURE_BOOKMARKS' value='9'/>
+ <enumerator name='SPA_FEATURE_FS_SS_LIMIT' value='10'/>
+ <enumerator name='SPA_FEATURE_LARGE_BLOCKS' value='11'/>
+ <enumerator name='SPA_FEATURE_LARGE_DNODE' value='12'/>
+ <enumerator name='SPA_FEATURE_SHA512' value='13'/>
+ <enumerator name='SPA_FEATURE_SKEIN' value='14'/>
+ <enumerator name='SPA_FEATURE_EDONR' value='15'/>
+ <enumerator name='SPA_FEATURE_USEROBJ_ACCOUNTING' value='16'/>
+ <enumerator name='SPA_FEATURE_ENCRYPTION' value='17'/>
+ <enumerator name='SPA_FEATURE_PROJECT_QUOTA' value='18'/>
+ <enumerator name='SPA_FEATURE_DEVICE_REMOVAL' value='19'/>
+ <enumerator name='SPA_FEATURE_OBSOLETE_COUNTS' value='20'/>
+ <enumerator name='SPA_FEATURE_POOL_CHECKPOINT' value='21'/>
+ <enumerator name='SPA_FEATURE_SPACEMAP_V2' value='22'/>
+ <enumerator name='SPA_FEATURE_ALLOCATION_CLASSES' value='23'/>
+ <enumerator name='SPA_FEATURE_RESILVER_DEFER' value='24'/>
+ <enumerator name='SPA_FEATURE_BOOKMARK_V2' value='25'/>
+ <enumerator name='SPA_FEATURE_REDACTION_BOOKMARKS' value='26'/>
+ <enumerator name='SPA_FEATURE_REDACTED_DATASETS' value='27'/>
+ <enumerator name='SPA_FEATURE_BOOKMARK_WRITTEN' value='28'/>
+ <enumerator name='SPA_FEATURE_LOG_SPACEMAP' value='29'/>
+ <enumerator name='SPA_FEATURE_LIVELIST' value='30'/>
+ <enumerator name='SPA_FEATURE_DEVICE_REBUILD' value='31'/>
+ <enumerator name='SPA_FEATURE_ZSTD_COMPRESS' value='32'/>
+ <enumerator name='SPA_FEATURE_DRAID' value='33'/>
+ <enumerator name='SPA_FEATURES' value='34'/>
+ </enum-decl>
+ <typedef-decl name='zfeature_flags_t' type-id='6db816a4' id='fc329033'/>
+ <enum-decl name='zfeature_flags' id='6db816a4'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZFEATURE_FLAG_READONLY_COMPAT' value='1'/>
+ <enumerator name='ZFEATURE_FLAG_MOS' value='2'/>
+ <enumerator name='ZFEATURE_FLAG_ACTIVATE_ON_ENABLE' value='4'/>
+ <enumerator name='ZFEATURE_FLAG_PER_DATASET' value='8'/>
+ </enum-decl>
+ <typedef-decl name='zfeature_type_t' type-id='c4fa2355' id='732d2bb2'/>
+ <enum-decl name='zfeature_type' id='c4fa2355'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZFEATURE_TYPE_BOOLEAN' value='0'/>
+ <enumerator name='ZFEATURE_TYPE_UINT64_ARRAY' value='1'/>
+ <enumerator name='ZFEATURE_NUM_TYPES' value='2'/>
+ </enum-decl>
+ <qualified-type-def type-id='d6618c78' const='yes' id='81a65028'/>
+ <pointer-type-def type-id='81a65028' size-in-bits='64' id='1acff326'/>
+ <pointer-type-def type-id='d6618c78' size-in-bits='64' id='a8425263'/>
+ <var-decl name='zfeature_checks_disable' type-id='c19b74c3' mangled-name='zfeature_checks_disable' visibility='default' elf-symbol-id='zfeature_checks_disable'/>
+ <var-decl name='spa_feature_table' type-id='d96379d0' mangled-name='spa_feature_table' visibility='default' elf-symbol-id='spa_feature_table'/>
+ <function-decl name='zpool_feature_init' mangled-name='zpool_feature_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_feature_init'>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zfeature_depends_on' mangled-name='zfeature_depends_on' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_depends_on'>
+ <parameter type-id='d6618c78' name='fid'/>
+ <parameter type-id='d6618c78' name='check'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='zfeature_lookup_name' mangled-name='zfeature_lookup_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_lookup_name'>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='a8425263' name='res'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zfeature_lookup_guid' mangled-name='zfeature_lookup_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_lookup_guid'>
+ <parameter type-id='80f4b756' name='guid'/>
+ <parameter type-id='a8425263' name='res'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zfeature_is_supported' mangled-name='zfeature_is_supported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_supported'>
+ <parameter type-id='80f4b756' name='guid'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='zfeature_is_valid_guid' mangled-name='zfeature_is_valid_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_valid_guid'>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='zfs_mod_supported' mangled-name='zfs_mod_supported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mod_supported'>
+ <parameter type-id='80f4b756' name='scope'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_comutil.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='80f4b756' size-in-bits='2624' id='ef31fedf'>
+ <subrange length='41' type-id='7359adad' id='cb834f44'/>
+ </array-type-def>
+ <typedef-decl name='zpool_load_policy_t' type-id='2f65b36f' id='d11b7617'/>
+ <class-decl name='zpool_load_policy' size-in-bits='256' is-struct='yes' visibility='default' id='2f65b36f'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='zlp_rewind' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='16736'>
- <var-decl name='libzfs_printerr' type-id='type-id-2' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='zlp_maxmeta' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='16768'>
- <var-decl name='libzfs_mnttab_enable' type-id='type-id-9' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='zlp_maxdata' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='16832'>
- <var-decl name='libzfs_mnttab_cache_lock' type-id='type-id-10' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='zlp_txg' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='17152'>
- <var-decl name='libzfs_mnttab_cache' type-id='type-id-11' visibility='default'/>
+ </class-decl>
+ <pointer-type-def type-id='d11b7617' size-in-bits='64' id='23432aaa'/>
+ <var-decl name='zfs_history_event_names' type-id='ef31fedf' mangled-name='zfs_history_event_names' visibility='default' elf-symbol-id='zfs_history_event_names'/>
+ <function-decl name='zfs_dataset_name_hidden' mangled-name='zfs_dataset_name_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_name_hidden'>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='zfs_spa_version_map' mangled-name='zfs_spa_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version_map'>
+ <parameter type-id='95e97e5e' name='zpl_version'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zfs_zpl_version_map' mangled-name='zfs_zpl_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_zpl_version_map'>
+ <parameter type-id='95e97e5e' name='spa_version'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zpool_get_load_policy' mangled-name='zpool_get_load_policy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_load_policy'>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='23432aaa' name='zlpp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zfs_special_devs' mangled-name='zfs_special_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_special_devs'>
+ <parameter type-id='5ce45b60' name='nv'/>
+ <parameter type-id='26a90f95' name='type'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='zfs_allocatable_devs' mangled-name='zfs_allocatable_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_allocatable_devs'>
+ <parameter type-id='5ce45b60' name='nv'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <pointer-type-def type-id='8e8d4be3' size-in-bits='64' id='5ce45b60'/>
+ <typedef-decl name='uint32_t' type-id='62f1140c' id='8f92235e'/>
+ <typedef-decl name='__uint32_t' type-id='f0981eeb' id='62f1140c'/>
+ <typedef-decl name='nvlist_t' type-id='ac266fd9' id='8e8d4be3'/>
+ <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='ac266fd9'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='nvl_version' type-id='3ff5601b' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='17472'>
- <var-decl name='libzfs_pool_iter' type-id='type-id-2' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='nvl_nvflag' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='17504'>
- <var-decl name='libzfs_prop_debug' type-id='type-id-9' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='nvl_priv' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='17536'>
- <var-decl name='libzfs_urire' type-id='type-id-12' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='nvl_flag' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='18048'>
- <var-decl name='libzfs_max_nvlist' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='nvl_pad' type-id='3ff5601b' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='18112'>
- <var-decl name='libfetch' type-id='type-id-13' visibility='default'/>
+ </class-decl>
+ <type-decl name='unsigned int' size-in-bits='32' id='f0981eeb'/>
+ <typedef-decl name='int32_t' type-id='33f57a65' id='3ff5601b'/>
+ <typedef-decl name='__int32_t' type-id='95e97e5e' id='33f57a65'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_deleg.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='f3f851ad' size-in-bits='infinite' id='bc4e5d90'>
+ <subrange length='infinite' id='031f2035'/>
+ </array-type-def>
+ <typedef-decl name='zfs_deleg_perm_tab_t' type-id='5aa05c1f' id='f3f851ad'/>
+ <class-decl name='zfs_deleg_perm_tab' size-in-bits='128' is-struct='yes' visibility='default' id='5aa05c1f'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='z_perm' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='18176'>
- <var-decl name='libfetch_load_error' type-id='type-id-14' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='z_note' type-id='4613c173' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='zpool_handle' size-in-bits='2560' is-struct='yes' visibility='default' id='type-id-15'>
+ <typedef-decl name='zfs_deleg_note_t' type-id='08f5ca18' id='4613c173'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca18'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZFS_DELEG_NOTE_CREATE' value='0'/>
+ <enumerator name='ZFS_DELEG_NOTE_DESTROY' value='1'/>
+ <enumerator name='ZFS_DELEG_NOTE_SNAPSHOT' value='2'/>
+ <enumerator name='ZFS_DELEG_NOTE_ROLLBACK' value='3'/>
+ <enumerator name='ZFS_DELEG_NOTE_CLONE' value='4'/>
+ <enumerator name='ZFS_DELEG_NOTE_PROMOTE' value='5'/>
+ <enumerator name='ZFS_DELEG_NOTE_RENAME' value='6'/>
+ <enumerator name='ZFS_DELEG_NOTE_SEND' value='7'/>
+ <enumerator name='ZFS_DELEG_NOTE_RECEIVE' value='8'/>
+ <enumerator name='ZFS_DELEG_NOTE_ALLOW' value='9'/>
+ <enumerator name='ZFS_DELEG_NOTE_USERPROP' value='10'/>
+ <enumerator name='ZFS_DELEG_NOTE_MOUNT' value='11'/>
+ <enumerator name='ZFS_DELEG_NOTE_SHARE' value='12'/>
+ <enumerator name='ZFS_DELEG_NOTE_USERQUOTA' value='13'/>
+ <enumerator name='ZFS_DELEG_NOTE_GROUPQUOTA' value='14'/>
+ <enumerator name='ZFS_DELEG_NOTE_USERUSED' value='15'/>
+ <enumerator name='ZFS_DELEG_NOTE_GROUPUSED' value='16'/>
+ <enumerator name='ZFS_DELEG_NOTE_USEROBJQUOTA' value='17'/>
+ <enumerator name='ZFS_DELEG_NOTE_GROUPOBJQUOTA' value='18'/>
+ <enumerator name='ZFS_DELEG_NOTE_USEROBJUSED' value='19'/>
+ <enumerator name='ZFS_DELEG_NOTE_GROUPOBJUSED' value='20'/>
+ <enumerator name='ZFS_DELEG_NOTE_HOLD' value='21'/>
+ <enumerator name='ZFS_DELEG_NOTE_RELEASE' value='22'/>
+ <enumerator name='ZFS_DELEG_NOTE_DIFF' value='23'/>
+ <enumerator name='ZFS_DELEG_NOTE_BOOKMARK' value='24'/>
+ <enumerator name='ZFS_DELEG_NOTE_LOAD_KEY' value='25'/>
+ <enumerator name='ZFS_DELEG_NOTE_CHANGE_KEY' value='26'/>
+ <enumerator name='ZFS_DELEG_NOTE_PROJECTUSED' value='27'/>
+ <enumerator name='ZFS_DELEG_NOTE_PROJECTQUOTA' value='28'/>
+ <enumerator name='ZFS_DELEG_NOTE_PROJECTOBJUSED' value='29'/>
+ <enumerator name='ZFS_DELEG_NOTE_PROJECTOBJQUOTA' value='30'/>
+ <enumerator name='ZFS_DELEG_NOTE_NONE' value='31'/>
+ </enum-decl>
+ <typedef-decl name='zfs_deleg_who_type_t' type-id='40ed39d2' id='36d4bd5a'/>
+ <enum-decl name='__anonymous_enum__1' is-anonymous='yes' id='40ed39d2'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZFS_DELEG_WHO_UNKNOWN' value='0'/>
+ <enumerator name='ZFS_DELEG_USER' value='117'/>
+ <enumerator name='ZFS_DELEG_USER_SETS' value='85'/>
+ <enumerator name='ZFS_DELEG_GROUP' value='103'/>
+ <enumerator name='ZFS_DELEG_GROUP_SETS' value='71'/>
+ <enumerator name='ZFS_DELEG_EVERYONE' value='101'/>
+ <enumerator name='ZFS_DELEG_EVERYONE_SETS' value='69'/>
+ <enumerator name='ZFS_DELEG_CREATE' value='99'/>
+ <enumerator name='ZFS_DELEG_CREATE_SETS' value='67'/>
+ <enumerator name='ZFS_DELEG_NAMED_SET' value='115'/>
+ <enumerator name='ZFS_DELEG_NAMED_SET_SETS' value='83'/>
+ </enum-decl>
+ <var-decl name='zfs_deleg_perm_tab' type-id='bc4e5d90' mangled-name='zfs_deleg_perm_tab' visibility='default' elf-symbol-id='zfs_deleg_perm_tab'/>
+ <function-decl name='zfs_deleg_whokey' mangled-name='zfs_deleg_whokey' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_whokey'>
+ <parameter type-id='26a90f95' name='attr'/>
+ <parameter type-id='36d4bd5a' name='type'/>
+ <parameter type-id='a84c031d' name='inheritchr'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zfs_deleg_verify_nvlist' mangled-name='zfs_deleg_verify_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_verify_nvlist'>
+ <parameter type-id='5ce45b60' name='nvp'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zfs_deleg_canonicalize_perm' mangled-name='zfs_deleg_canonicalize_perm' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_canonicalize_perm'>
+ <parameter type-id='80f4b756' name='perm'/>
+ <return type-id='80f4b756'/>
+ </function-decl>
+ <pointer-type-def type-id='48b5725f' size-in-bits='64' id='eaa32e2f'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='256' id='85c64d26'>
+ <subrange length='4' type-id='7359adad' id='16fe7105'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='512' id='c5d13f42'>
+ <subrange length='8' type-id='7359adad' id='56e0c0b1'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='90dbb6d6' size-in-bits='2048' id='16582e69'>
+ <subrange length='4' type-id='7359adad' id='16fe7105'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='8240361c' size-in-bits='1024' id='481f90b1'>
+ <subrange length='4' type-id='7359adad' id='16fe7105'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='7c1ab40c' size-in-bits='512' id='cbd91ec1'>
+ <subrange length='4' type-id='7359adad' id='16fe7105'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='6d059eaa' size-in-bits='1024' id='729b6ebb'>
+ <subrange length='4' type-id='7359adad' id='16fe7105'/>
+ </array-type-def>
+ <typedef-decl name='zio_abd_checksum_func_t' type-id='3f8e8d11' id='c2eb138a'/>
+ <class-decl name='zio_abd_checksum_func' size-in-bits='192' is-struct='yes' visibility='default' id='aa14691a'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zpool_hdl' type-id='type-id-16' visibility='default'/>
+ <var-decl name='acf_init' type-id='0bcca125' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zpool_next' type-id='type-id-4' visibility='default'/>
+ <var-decl name='acf_fini' type-id='bfe36153' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zpool_name' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2176'>
- <var-decl name='zpool_state' type-id='type-id-2' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2240'>
- <var-decl name='zpool_config_size' type-id='type-id-18' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2304'>
- <var-decl name='zpool_config' type-id='type-id-19' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2368'>
- <var-decl name='zpool_old_config' type-id='type-id-19' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2432'>
- <var-decl name='zpool_props' type-id='type-id-19' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2496'>
- <var-decl name='zpool_start_block' type-id='type-id-20' visibility='default'/>
+ <var-decl name='acf_iter' type-id='1e276399' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='libzfs_handle_t' type-id='type-id-3' id='type-id-21'/>
- <pointer-type-def type-id='type-id-21' size-in-bits='64' id='type-id-16'/>
- <typedef-decl name='zpool_handle_t' type-id='type-id-15' id='type-id-22'/>
- <pointer-type-def type-id='type-id-22' size-in-bits='64' id='type-id-4'/>
- <type-decl name='char' size-in-bits='8' id='type-id-23'/>
- <type-decl name='unsigned long int' size-in-bits='64' id='type-id-24'/>
-
- <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='2048' id='type-id-17'>
- <subrange length='256' type-id='type-id-24' id='type-id-25'/>
-
- </array-type-def>
- <typedef-decl name='size_t' type-id='type-id-24' id='type-id-18'/>
- <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-26'>
+ <typedef-decl name='zio_abd_checksum_init_t' type-id='a5444274' id='029a8ebe'/>
+ <typedef-decl name='zio_abd_checksum_data_t' type-id='4bf4b004' id='74e39470'/>
+ <class-decl name='zio_abd_checksum_data' size-in-bits='256' is-struct='yes' visibility='default' id='4bf4b004'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nvl_version' type-id='type-id-27' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='nvl_nvflag' type-id='type-id-28' visibility='default'/>
+ <var-decl name='acd_byteorder' type-id='595a65ec' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nvl_priv' type-id='type-id-7' visibility='default'/>
+ <var-decl name='acd_ctx' type-id='0f7df99e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='nvl_flag' type-id='type-id-28' visibility='default'/>
+ <var-decl name='acd_zcp' type-id='c24fc2ee' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='nvl_pad' type-id='type-id-27' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='acd_private' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__int32_t' type-id='type-id-2' id='type-id-29'/>
- <typedef-decl name='int32_t' type-id='type-id-29' id='type-id-27'/>
- <type-decl name='unsigned int' size-in-bits='32' id='type-id-30'/>
- <typedef-decl name='__uint32_t' type-id='type-id-30' id='type-id-31'/>
- <typedef-decl name='uint32_t' type-id='type-id-31' id='type-id-28'/>
- <typedef-decl name='__uint64_t' type-id='type-id-24' id='type-id-32'/>
- <typedef-decl name='uint64_t' type-id='type-id-32' id='type-id-7'/>
- <typedef-decl name='nvlist_t' type-id='type-id-26' id='type-id-33'/>
- <pointer-type-def type-id='type-id-33' size-in-bits='64' id='type-id-19'/>
- <type-decl name='long long int' size-in-bits='64' id='type-id-34'/>
- <typedef-decl name='longlong_t' type-id='type-id-34' id='type-id-35'/>
- <typedef-decl name='diskaddr_t' type-id='type-id-35' id='type-id-20'/>
- <class-decl name='uu_avl_pool' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-36'/>
- <typedef-decl name='uu_avl_pool_t' type-id='type-id-36' id='type-id-37'/>
- <pointer-type-def type-id='type-id-37' size-in-bits='64' id='type-id-5'/>
- <class-decl name='uu_avl' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-38'/>
- <typedef-decl name='uu_avl_t' type-id='type-id-38' id='type-id-39'/>
- <pointer-type-def type-id='type-id-39' size-in-bits='64' id='type-id-6'/>
-
- <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='8192' id='type-id-8'>
- <subrange length='1024' type-id='type-id-24' id='type-id-40'/>
-
- </array-type-def>
- <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-41'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-42'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='B_FALSE' value='0'/>
- <enumerator name='B_TRUE' value='1'/>
+ <typedef-decl name='zio_byteorder_t' type-id='08f5ca19' id='595a65ec'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca19'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZIO_CHECKSUM_NATIVE' value='0'/>
+ <enumerator name='ZIO_CHECKSUM_BYTESWAP' value='1'/>
</enum-decl>
- <typedef-decl name='boolean_t' type-id='type-id-42' id='type-id-9'/>
- <union-decl name='__anonymous_union__' size-in-bits='320' is-anonymous='yes' visibility='default' id='type-id-43'>
+ <typedef-decl name='fletcher_4_ctx_t' type-id='1f951ade' id='4b675395'/>
+ <union-decl name='fletcher_4_ctx' size-in-bits='2048' visibility='default' id='1f951ade'>
+ <data-member access='private'>
+ <var-decl name='scalar' type-id='39730d0b' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='superscalar' type-id='729b6ebb' visibility='default'/>
+ </data-member>
<data-member access='private'>
- <var-decl name='__data' type-id='type-id-44' visibility='default'/>
+ <var-decl name='sse' type-id='cbd91ec1' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__size' type-id='type-id-45' visibility='default'/>
+ <var-decl name='avx' type-id='481f90b1' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__align' type-id='type-id-46' visibility='default'/>
+ <var-decl name='avx512' type-id='16582e69' visibility='default'/>
</data-member>
</union-decl>
- <class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-44'>
+ <typedef-decl name='zio_cksum_t' type-id='1d53e28b' id='39730d0b'/>
+ <class-decl name='zio_cksum' size-in-bits='256' is-struct='yes' visibility='default' id='1d53e28b'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__lock' type-id='type-id-2' visibility='default'/>
+ <var-decl name='zc_word' type-id='85c64d26' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='__count' type-id='type-id-30' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='__owner' type-id='type-id-2' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='__nusers' type-id='type-id-30' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='__kind' type-id='type-id-2' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='__spins' type-id='type-id-47' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='176'>
- <var-decl name='__elision' type-id='type-id-47' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='__list' type-id='type-id-48' visibility='default'/>
+ </class-decl>
+ <typedef-decl name='zfs_fletcher_superscalar_t' type-id='28efb250' id='6d059eaa'/>
+ <class-decl name='zfs_fletcher_superscalar' size-in-bits='256' is-struct='yes' visibility='default' id='28efb250'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='v' type-id='85c64d26' visibility='default'/>
</data-member>
</class-decl>
- <type-decl name='short int' size-in-bits='16' id='type-id-47'/>
- <class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-49'>
+ <typedef-decl name='zfs_fletcher_sse_t' type-id='acd4019a' id='7c1ab40c'/>
+ <class-decl name='zfs_fletcher_sse' size-in-bits='128' is-struct='yes' visibility='default' id='acd4019a'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__prev' type-id='type-id-50' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='__next' type-id='type-id-50' visibility='default'/>
+ <var-decl name='v' type-id='c1c22e6c' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-49' size-in-bits='64' id='type-id-50'/>
- <typedef-decl name='__pthread_list_t' type-id='type-id-49' id='type-id-48'/>
-
- <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='320' id='type-id-45'>
- <subrange length='40' type-id='type-id-24' id='type-id-51'/>
-
- </array-type-def>
- <type-decl name='long int' size-in-bits='64' id='type-id-46'/>
- <typedef-decl name='pthread_mutex_t' type-id='type-id-43' id='type-id-10'/>
- <class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-52'>
+ <typedef-decl name='zfs_fletcher_avx_t' type-id='8c208dfa' id='8240361c'/>
+ <class-decl name='zfs_fletcher_avx' size-in-bits='256' is-struct='yes' visibility='default' id='8c208dfa'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='avl_root' type-id='type-id-53' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='avl_compar' type-id='type-id-54' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='avl_offset' type-id='type-id-18' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='avl_numnodes' type-id='type-id-55' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='avl_pad' type-id='type-id-18' visibility='default'/>
+ <var-decl name='v' type-id='85c64d26' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-56'>
+ <typedef-decl name='zfs_fletcher_avx512_t' type-id='c6d0c382' id='90dbb6d6'/>
+ <class-decl name='zfs_fletcher_avx512' size-in-bits='512' is-struct='yes' visibility='default' id='c6d0c382'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='avl_child' type-id='type-id-57' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='avl_pcb' type-id='type-id-58' visibility='default'/>
+ <var-decl name='v' type-id='c5d13f42' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-56' size-in-bits='64' id='type-id-53'/>
-
- <array-type-def dimensions='1' type-id='type-id-53' size-in-bits='128' id='type-id-57'>
- <subrange length='2' type-id='type-id-24' id='type-id-59'/>
-
+ <typedef-decl name='zio_abd_checksum_fini_t' type-id='a5444274' id='d6fd5c6c'/>
+ <typedef-decl name='zio_abd_checksum_iter_t' type-id='f4a1892e' id='cefa0f4a'/>
+ <qualified-type-def type-id='aa14691a' const='yes' id='3f8e8d11'/>
+ <pointer-type-def type-id='4b675395' size-in-bits='64' id='0f7df99e'/>
+ <pointer-type-def type-id='74e39470' size-in-bits='64' id='eefe7427'/>
+ <pointer-type-def type-id='d6fd5c6c' size-in-bits='64' id='bfe36153'/>
+ <pointer-type-def type-id='029a8ebe' size-in-bits='64' id='0bcca125'/>
+ <pointer-type-def type-id='cefa0f4a' size-in-bits='64' id='1e276399'/>
+ <pointer-type-def type-id='39730d0b' size-in-bits='64' id='c24fc2ee'/>
+ <var-decl name='fletcher_4_abd_ops' type-id='c2eb138a' mangled-name='fletcher_4_abd_ops' visibility='default' elf-symbol-id='fletcher_4_abd_ops'/>
+ <function-decl name='fletcher_4_fini' mangled-name='fletcher_4_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_fini'>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fletcher_4_init' mangled-name='fletcher_4_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_init'>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fletcher_4_incremental_byteswap' mangled-name='fletcher_4_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_incremental_byteswap'>
+ <parameter type-id='eaa32e2f' name='buf'/>
+ <parameter type-id='b59d7dce' name='size'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='fletcher_4_native_varsize' mangled-name='fletcher_4_native_varsize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native_varsize'>
+ <parameter type-id='eaa32e2f' name='buf'/>
+ <parameter type-id='9c313c2d' name='size'/>
+ <parameter type-id='c24fc2ee' name='zcp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fletcher_4_impl_set' mangled-name='fletcher_4_impl_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_impl_set'>
+ <parameter type-id='80f4b756' name='val'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='fletcher_2_byteswap' mangled-name='fletcher_2_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_byteswap'>
+ <parameter type-id='eaa32e2f' name='buf'/>
+ <parameter type-id='9c313c2d' name='size'/>
+ <parameter type-id='eaa32e2f' name='ctx_template'/>
+ <parameter type-id='c24fc2ee' name='zcp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fletcher_2_incremental_byteswap' mangled-name='fletcher_2_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_byteswap'>
+ <parameter type-id='eaa32e2f' name='buf'/>
+ <parameter type-id='b59d7dce' name='size'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='fletcher_2_native' mangled-name='fletcher_2_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_native'>
+ <parameter type-id='eaa32e2f' name='buf'/>
+ <parameter type-id='9c313c2d' name='size'/>
+ <parameter type-id='eaa32e2f' name='ctx_template'/>
+ <parameter type-id='c24fc2ee' name='zcp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fletcher_2_incremental_native' mangled-name='fletcher_2_incremental_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_native'>
+ <parameter type-id='eaa32e2f' name='buf'/>
+ <parameter type-id='b59d7dce' name='size'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='fletcher_init' mangled-name='fletcher_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_init'>
+ <parameter type-id='c24fc2ee' name='zcp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fletcher_4_native' mangled-name='fletcher_4_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native'>
+ <parameter type-id='eaa32e2f' name='buf'/>
+ <parameter type-id='9c313c2d' name='size'/>
+ <parameter type-id='eaa32e2f' name='ctx_template'/>
+ <parameter type-id='c24fc2ee' name='zcp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fletcher_4_byteswap' mangled-name='fletcher_4_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_byteswap'>
+ <parameter type-id='eaa32e2f' name='buf'/>
+ <parameter type-id='9c313c2d' name='size'/>
+ <parameter type-id='eaa32e2f' name='ctx_template'/>
+ <parameter type-id='c24fc2ee' name='zcp'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='fletcher_4_incremental_native' mangled-name='fletcher_4_incremental_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_incremental_native'>
+ <parameter type-id='eaa32e2f' name='buf'/>
+ <parameter type-id='b59d7dce' name='size'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='f4a1892e'>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='b59d7dce'/>
+ <parameter type-id='eaa32e2f'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='a5444274'>
+ <parameter type-id='eefe7427'/>
+ <return type-id='48b5725f'/>
+ </function-type>
+ <typedef-decl name='size_t' type-id='7359adad' id='b59d7dce'/>
+ <array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='128' id='c1c22e6c'>
+ <subrange length='2' type-id='7359adad' id='52efc4ef'/>
</array-type-def>
- <typedef-decl name='uintptr_t' type-id='type-id-24' id='type-id-58'/>
- <pointer-type-def type-id='type-id-1' size-in-bits='64' id='type-id-13'/>
- <pointer-type-def type-id='type-id-60' size-in-bits='64' id='type-id-54'/>
- <typedef-decl name='ulong_t' type-id='type-id-24' id='type-id-55'/>
- <typedef-decl name='avl_tree_t' type-id='type-id-52' id='type-id-11'/>
- <class-decl name='re_pattern_buffer' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-61'>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_avx512.c' language='LANG_C99'>
+ <typedef-decl name='fletcher_4_ops_t' type-id='57f479a0' id='eba91718'/>
+ <class-decl name='fletcher_4_func' size-in-bits='512' is-struct='yes' visibility='default' id='57f479a0'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='buffer' type-id='type-id-62' visibility='default'/>
+ <var-decl name='init_native' type-id='b9ae1656' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='allocated' type-id='type-id-24' visibility='default'/>
+ <var-decl name='fini_native' type-id='c4c1f4fc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='used' type-id='type-id-24' visibility='default'/>
+ <var-decl name='compute_native' type-id='ad1dc4cb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='syntax' type-id='type-id-63' visibility='default'/>
+ <var-decl name='init_byteswap' type-id='b9ae1656' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='fastmap' type-id='type-id-14' visibility='default'/>
+ <var-decl name='fini_byteswap' type-id='c4c1f4fc' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='translate' type-id='type-id-62' visibility='default'/>
+ <var-decl name='compute_byteswap' type-id='ad1dc4cb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='re_nsub' type-id='type-id-18' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='31'>
- <var-decl name='can_be_null' type-id='type-id-30' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='29'>
- <var-decl name='regs_allocated' type-id='type-id-30' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='28'>
- <var-decl name='fastmap_accurate' type-id='type-id-30' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='27'>
- <var-decl name='no_sub' type-id='type-id-30' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='26'>
- <var-decl name='not_bol' type-id='type-id-30' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='25'>
- <var-decl name='not_eol' type-id='type-id-30' visibility='default'/>
+ <var-decl name='valid' type-id='297d38bc' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='24'>
- <var-decl name='newline_anchor' type-id='type-id-30' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='name' type-id='80f4b756' visibility='default'/>
</data-member>
</class-decl>
- <type-decl name='unsigned char' size-in-bits='8' id='type-id-64'/>
- <pointer-type-def type-id='type-id-64' size-in-bits='64' id='type-id-62'/>
- <typedef-decl name='reg_syntax_t' type-id='type-id-24' id='type-id-63'/>
- <pointer-type-def type-id='type-id-23' size-in-bits='64' id='type-id-14'/>
- <typedef-decl name='regex_t' type-id='type-id-61' id='type-id-12'/>
- <class-decl name='zfs_handle' size-in-bits='4928' is-struct='yes' visibility='default' id='type-id-65'>
+ <typedef-decl name='fletcher_4_init_f' type-id='173aa527' id='b9ae1656'/>
+ <typedef-decl name='fletcher_4_fini_f' type-id='0ad5b8a8' id='c4c1f4fc'/>
+ <typedef-decl name='fletcher_4_compute_f' type-id='38147eff' id='ad1dc4cb'/>
+ <qualified-type-def type-id='eba91718' const='yes' id='9eeabdc8'/>
+ <pointer-type-def type-id='e9e61702' size-in-bits='64' id='297d38bc'/>
+ <pointer-type-def type-id='fe40251b' size-in-bits='64' id='173aa527'/>
+ <pointer-type-def type-id='17fb1f83' size-in-bits='64' id='38147eff'/>
+ <pointer-type-def type-id='fb39e25e' size-in-bits='64' id='0ad5b8a8'/>
+ <var-decl name='fletcher_4_avx512f_ops' type-id='9eeabdc8' mangled-name='fletcher_4_avx512f_ops' visibility='default' elf-symbol-id='fletcher_4_avx512f_ops'/>
+ <var-decl name='fletcher_4_avx512bw_ops' type-id='9eeabdc8' mangled-name='fletcher_4_avx512bw_ops' visibility='default' elf-symbol-id='fletcher_4_avx512bw_ops'/>
+ <function-type size-in-bits='64' id='e9e61702'>
+ <return type-id='c19b74c3'/>
+ </function-type>
+ <function-type size-in-bits='64' id='fe40251b'>
+ <parameter type-id='0f7df99e'/>
+ <return type-id='48b5725f'/>
+ </function-type>
+ <function-type size-in-bits='64' id='17fb1f83'>
+ <parameter type-id='0f7df99e'/>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='9c313c2d'/>
+ <return type-id='48b5725f'/>
+ </function-type>
+ <function-type size-in-bits='64' id='fb39e25e'>
+ <parameter type-id='0f7df99e'/>
+ <parameter type-id='c24fc2ee'/>
+ <return type-id='48b5725f'/>
+ </function-type>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_intel.c' language='LANG_C99'>
+ <var-decl name='fletcher_4_avx2_ops' type-id='9eeabdc8' mangled-name='fletcher_4_avx2_ops' visibility='default' elf-symbol-id='fletcher_4_avx2_ops'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_sse.c' language='LANG_C99'>
+ <var-decl name='fletcher_4_sse2_ops' type-id='9eeabdc8' mangled-name='fletcher_4_sse2_ops' visibility='default' elf-symbol-id='fletcher_4_sse2_ops'/>
+ <var-decl name='fletcher_4_ssse3_ops' type-id='9eeabdc8' mangled-name='fletcher_4_ssse3_ops' visibility='default' elf-symbol-id='fletcher_4_ssse3_ops'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_superscalar.c' language='LANG_C99'>
+ <var-decl name='fletcher_4_superscalar_ops' type-id='9eeabdc8' mangled-name='fletcher_4_superscalar_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar_ops'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_superscalar4.c' language='LANG_C99'>
+ <var-decl name='fletcher_4_superscalar4_ops' type-id='9eeabdc8' mangled-name='fletcher_4_superscalar4_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar4_ops'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_namecheck.c' language='LANG_C99'>
+ <typedef-decl name='namecheck_err_t' type-id='08f5ca1a' id='8e0af06e'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca1a'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='NAME_ERR_LEADING_SLASH' value='0'/>
+ <enumerator name='NAME_ERR_EMPTY_COMPONENT' value='1'/>
+ <enumerator name='NAME_ERR_TRAILING_SLASH' value='2'/>
+ <enumerator name='NAME_ERR_INVALCHAR' value='3'/>
+ <enumerator name='NAME_ERR_MULTIPLE_DELIMITERS' value='4'/>
+ <enumerator name='NAME_ERR_NOLETTER' value='5'/>
+ <enumerator name='NAME_ERR_RESERVED' value='6'/>
+ <enumerator name='NAME_ERR_DISKLIKE' value='7'/>
+ <enumerator name='NAME_ERR_TOOLONG' value='8'/>
+ <enumerator name='NAME_ERR_SELF_REF' value='9'/>
+ <enumerator name='NAME_ERR_PARENT_REF' value='10'/>
+ <enumerator name='NAME_ERR_NO_AT' value='11'/>
+ <enumerator name='NAME_ERR_NO_POUND' value='12'/>
+ </enum-decl>
+ <pointer-type-def type-id='8e0af06e' size-in-bits='64' id='053457bd'/>
+ <var-decl name='zfs_max_dataset_nesting' type-id='95e97e5e' mangled-name='zfs_max_dataset_nesting' visibility='default' elf-symbol-id='zfs_max_dataset_nesting'/>
+ <function-decl name='pool_namecheck' mangled-name='pool_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='pool_namecheck'>
+ <parameter type-id='80f4b756' name='pool'/>
+ <parameter type-id='053457bd' name='why'/>
+ <parameter type-id='26a90f95' name='what'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='mountpoint_namecheck' mangled-name='mountpoint_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mountpoint_namecheck'>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='053457bd' name='why'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='snapshot_namecheck' mangled-name='snapshot_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='snapshot_namecheck'>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='053457bd' name='why'/>
+ <parameter type-id='26a90f95' name='what'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='bookmark_namecheck' mangled-name='bookmark_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='bookmark_namecheck'>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='053457bd' name='why'/>
+ <parameter type-id='26a90f95' name='what'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='dataset_namecheck' mangled-name='dataset_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dataset_namecheck'>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='053457bd' name='why'/>
+ <parameter type-id='26a90f95' name='what'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='dataset_nestcheck' mangled-name='dataset_nestcheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dataset_nestcheck'>
+ <parameter type-id='80f4b756' name='path'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='permset_namecheck' mangled-name='permset_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='permset_namecheck'>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='053457bd' name='why'/>
+ <parameter type-id='26a90f95' name='what'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='get_dataset_depth' mangled-name='get_dataset_depth' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_dataset_depth'>
+ <parameter type-id='80f4b756' name='path'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zfs_component_namecheck' mangled-name='zfs_component_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_component_namecheck'>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='053457bd' name='why'/>
+ <parameter type-id='26a90f95' name='what'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='entity_namecheck' mangled-name='entity_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='entity_namecheck'>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='053457bd' name='why'/>
+ <parameter type-id='26a90f95' name='what'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_prop.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='80f4b756' size-in-bits='768' id='35e4b367'>
+ <subrange length='12' type-id='7359adad' id='84827bdc'/>
+ </array-type-def>
+ <typedef-decl name='zprop_type_t' type-id='08f5ca1b' id='31429eff'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca1b'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='PROP_TYPE_NUMBER' value='0'/>
+ <enumerator name='PROP_TYPE_STRING' value='1'/>
+ <enumerator name='PROP_TYPE_INDEX' value='2'/>
+ </enum-decl>
+ <typedef-decl name='zprop_desc_t' type-id='686c4527' id='ffa52b96'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='704' is-struct='yes' is-anonymous='yes' naming-typedef-id='ffa52b96' visibility='default' id='686c4527'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zfs_hdl' type-id='type-id-16' visibility='default'/>
+ <var-decl name='pd_name' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zpool_hdl' type-id='type-id-4' visibility='default'/>
+ <var-decl name='pd_propnum' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='pd_proptype' type-id='31429eff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zfs_name' type-id='type-id-17' visibility='default'/>
+ <var-decl name='pd_strdefault' type-id='80f4b756' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2176'>
- <var-decl name='zfs_type' type-id='type-id-66' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='pd_numdefault' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2208'>
- <var-decl name='zfs_head_type' type-id='type-id-66' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='pd_attr' type-id='999701cc' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2240'>
- <var-decl name='zfs_dmustats' type-id='type-id-67' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='pd_types' type-id='95e97e5e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='4544'>
- <var-decl name='zfs_props' type-id='type-id-19' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='pd_values' type-id='80f4b756' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='4608'>
- <var-decl name='zfs_user_props' type-id='type-id-19' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='pd_colname' type-id='80f4b756' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='4672'>
- <var-decl name='zfs_recvd_props' type-id='type-id-19' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='pd_rightalign' type-id='c19b74c3' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='4736'>
- <var-decl name='zfs_mntcheck' type-id='type-id-9' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='480'>
+ <var-decl name='pd_visible' type-id='c19b74c3' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='4800'>
- <var-decl name='zfs_mntopts' type-id='type-id-14' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='pd_zfs_mod_supported' type-id='c19b74c3' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='4864'>
- <var-decl name='zfs_props_table' type-id='type-id-68' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='pd_table' type-id='c8bc397b' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='640'>
+ <var-decl name='pd_table_size' type-id='b59d7dce' visibility='default'/>
</data-member>
</class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-69'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZFS_TYPE_FILESYSTEM' value='1'/>
- <enumerator name='ZFS_TYPE_SNAPSHOT' value='2'/>
- <enumerator name='ZFS_TYPE_VOLUME' value='4'/>
- <enumerator name='ZFS_TYPE_POOL' value='8'/>
- <enumerator name='ZFS_TYPE_BOOKMARK' value='16'/>
+ <typedef-decl name='zprop_attr_t' type-id='40ed39d3' id='999701cc'/>
+ <enum-decl name='__anonymous_enum__1' is-anonymous='yes' id='40ed39d3'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='PROP_DEFAULT' value='0'/>
+ <enumerator name='PROP_READONLY' value='1'/>
+ <enumerator name='PROP_INHERIT' value='2'/>
+ <enumerator name='PROP_ONETIME' value='3'/>
+ <enumerator name='PROP_ONETIME_DEFAULT' value='4'/>
</enum-decl>
- <typedef-decl name='zfs_type_t' type-id='type-id-69' id='type-id-66'/>
- <class-decl name='dmu_objset_stats' size-in-bits='2304' is-struct='yes' visibility='default' id='type-id-70'>
+ <typedef-decl name='zprop_index_t' type-id='87957af9' id='64636ce3'/>
+ <class-decl name='zfs_index' size-in-bits='128' is-struct='yes' visibility='default' id='87957af9'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='dds_num_clones' type-id='type-id-7' visibility='default'/>
+ <var-decl name='pi_name' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='dds_creation_txg' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='dds_guid' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='dds_type' type-id='type-id-71' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='dds_is_snapshot' type-id='type-id-72' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='232'>
- <var-decl name='dds_inconsistent' type-id='type-id-72' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='240'>
- <var-decl name='dds_redacted' type-id='type-id-72' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='248'>
- <var-decl name='dds_origin' type-id='type-id-17' visibility='default'/>
+ <var-decl name='pi_value' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
- <enum-decl name='dmu_objset_type' id='type-id-73'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='DMU_OST_NONE' value='0'/>
- <enumerator name='DMU_OST_META' value='1'/>
- <enumerator name='DMU_OST_ZFS' value='2'/>
- <enumerator name='DMU_OST_ZVOL' value='3'/>
- <enumerator name='DMU_OST_OTHER' value='4'/>
- <enumerator name='DMU_OST_ANY' value='5'/>
- <enumerator name='DMU_OST_NUMTYPES' value='6'/>
- </enum-decl>
- <typedef-decl name='dmu_objset_type_t' type-id='type-id-73' id='type-id-71'/>
- <typedef-decl name='__uint8_t' type-id='type-id-64' id='type-id-74'/>
- <typedef-decl name='uint8_t' type-id='type-id-74' id='type-id-72'/>
- <typedef-decl name='dmu_objset_stats_t' type-id='type-id-70' id='type-id-67'/>
- <pointer-type-def type-id='type-id-72' size-in-bits='64' id='type-id-68'/>
- <typedef-decl name='zfs_handle_t' type-id='type-id-65' id='type-id-75'/>
- <pointer-type-def type-id='type-id-75' size-in-bits='64' id='type-id-76'/>
- <pointer-type-def type-id='type-id-77' size-in-bits='64' id='type-id-78'/>
- <typedef-decl name='zfs_iter_f' type-id='type-id-78' id='type-id-79'/>
- <function-decl name='zfs_iter_root' mangled-name='zfs_iter_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_root'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-79' name='func'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <pointer-type-def type-id='type-id-80' size-in-bits='64' id='type-id-81'/>
- <typedef-decl name='zpool_iter_f' type-id='type-id-81' id='type-id-82'/>
- <function-decl name='zpool_iter' mangled-name='zpool_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_iter'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-82' name='func'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <qualified-type-def type-id='type-id-23' const='yes' id='type-id-83'/>
- <pointer-type-def type-id='type-id-83' size-in-bits='64' id='type-id-84'/>
- <function-decl name='zpool_skip_pool' mangled-name='zpool_skip_pool' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_skip_pool'>
- <parameter type-id='type-id-84' name='poolname'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <pointer-type-def type-id='type-id-9' size-in-bits='64' id='type-id-85'/>
- <function-decl name='zpool_refresh_stats' mangled-name='zpool_refresh_stats' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_refresh_stats'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-85' name='missing'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_get_features' mangled-name='zpool_get_features' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_features'>
- <parameter type-id='type-id-4' name='zhp'/>
- <return type-id='type-id-19'/>
- </function-decl>
- <pointer-type-def type-id='type-id-19' size-in-bits='64' id='type-id-86'/>
- <function-decl name='zpool_get_config' mangled-name='zpool_get_config' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_config'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-86' name='oldconfig'/>
- <return type-id='type-id-19'/>
- </function-decl>
- <function-decl name='uu_avl_first' mangled-name='uu_avl_first' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='make_dataset_handle' mangled-name='make_dataset_handle' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_next' mangled-name='uu_avl_next' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_open_silent' mangled-name='zpool_open_silent' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strchr' mangled-name='strchr' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='getenv' mangled-name='getenv' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__builtin___strcpy_chk' mangled-name='__strcpy_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__builtin_memset' mangled-name='memset' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zcmd_alloc_dst_nvlist' mangled-name='zcmd_alloc_dst_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__errno_location' mangled-name='__errno_location' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zcmd_expand_dst_nvlist' mangled-name='zcmd_expand_dst_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_ioctl' mangled-name='zfs_ioctl' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zcmd_read_dst_nvlist' mangled-name='zcmd_read_dst_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zcmd_free_nvlists' mangled-name='zcmd_free_nvlists' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_free' mangled-name='nvlist_free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_exists' mangled-name='nvlist_exists' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_nvlist' mangled-name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='no_memory' mangled-name='no_memory' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_dup' mangled-name='nvlist_dup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_name' mangled-name='nvpair_name' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_strdup' mangled-name='zfs_strdup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_nvlist' mangled-name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='dcgettext' mangled-name='dcgettext' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_standard_error' mangled-name='zfs_standard_error' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uu_avl_teardown' mangled-name='uu_avl_teardown' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_next_nvpair' mangled-name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-type size-in-bits='64' id='type-id-60'>
- <parameter type-id='type-id-13'/>
- <parameter type-id='type-id-13'/>
- <return type-id='type-id-2'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-77'>
- <parameter type-id='type-id-76'/>
- <parameter type-id='type-id-13'/>
- <return type-id='type-id-2'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-80'>
- <parameter type-id='type-id-4'/>
- <parameter type-id='type-id-13'/>
- <return type-id='type-id-2'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_crypto.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zfs_crypto_rewrap' mangled-name='zfs_crypto_rewrap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_rewrap'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-19' name='raw_props'/>
- <parameter type-id='type-id-9' name='inheritkey'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_crypto_unload_key' mangled-name='zfs_crypto_unload_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_unload_key'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_crypto_load_key' mangled-name='zfs_crypto_load_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_load_key'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-9' name='noop'/>
- <parameter type-id='type-id-14' name='alt_keylocation'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_crypto_attempt_load_keys' mangled-name='zfs_crypto_attempt_load_keys' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_attempt_load_keys'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-14' name='fsname'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_crypto_clone_check' mangled-name='zfs_crypto_clone_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_clone_check'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-76' name='origin_zhp'/>
- <parameter type-id='type-id-14' name='parent_name'/>
- <parameter type-id='type-id-19' name='props'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <pointer-type-def type-id='type-id-68' size-in-bits='64' id='type-id-87'/>
- <typedef-decl name='uint_t' type-id='type-id-30' id='type-id-88'/>
- <pointer-type-def type-id='type-id-88' size-in-bits='64' id='type-id-89'/>
- <function-decl name='zfs_crypto_create' mangled-name='zfs_crypto_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_create'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-14' name='parent_name'/>
- <parameter type-id='type-id-19' name='props'/>
- <parameter type-id='type-id-19' name='pool_props'/>
- <parameter type-id='type-id-9' name='stdin_available'/>
- <parameter type-id='type-id-87' name='wkeydata_out'/>
- <parameter type-id='type-id-89' name='wkeylen_out'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_crypto_get_encryption_root' mangled-name='zfs_crypto_get_encryption_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_get_encryption_root'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-85' name='is_encroot'/>
- <parameter type-id='type-id-14' name='buf'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='__builtin___snprintf_chk' mangled-name='__snprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_name_to_prop' mangled-name='zfs_name_to_prop' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_error_aux' mangled-name='zfs_error_aux' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvlist_alloc' mangled-name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_valid_proplist' mangled-name='zfs_valid_proplist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_parent_name' mangled-name='zfs_parent_name' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_change_key' mangled-name='lzc_change_key' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_to_name' mangled-name='zfs_prop_to_name' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint64' mangled-name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_string' mangled-name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <pointer-type-def type-id='80f4b756' size-in-bits='64' id='7d3cd834'/>
+ <qualified-type-def type-id='64636ce3' const='yes' id='072f7953'/>
+ <pointer-type-def type-id='072f7953' size-in-bits='64' id='c8bc397b'/>
+ <pointer-type-def type-id='ffa52b96' size-in-bits='64' id='76c8174b'/>
+ <var-decl name='zfs_userquota_prop_prefixes' type-id='35e4b367' mangled-name='zfs_userquota_prop_prefixes' visibility='default' elf-symbol-id='zfs_userquota_prop_prefixes'/>
+ <function-decl name='zfs_prop_align_right' mangled-name='zfs_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_align_right'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='nvlist_add_uint64' mangled-name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_column_name' mangled-name='zfs_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_column_name'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='nvlist_add_string' mangled-name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_is_string' mangled-name='zfs_prop_is_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_is_string'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='lzc_unload_key' mangled-name='lzc_unload_key' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_values' mangled-name='zfs_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_values'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='lzc_load_key' mangled-name='lzc_load_key' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_valid_keylocation' mangled-name='zfs_prop_valid_keylocation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_keylocation'>
+ <parameter type-id='80f4b756' name='str'/>
+ <parameter type-id='c19b74c3' name='encrypted'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='__printf_chk' mangled-name='__printf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_encryption_key_param' mangled-name='zfs_prop_encryption_key_param' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_encryption_key_param'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='zfs_handle_dup' mangled-name='zfs_handle_dup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_inheritable' mangled-name='zfs_prop_inheritable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inheritable'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='zfs_iter_filesystems' mangled-name='zfs_iter_filesystems' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_to_name' mangled-name='zfs_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_to_name'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='__builtin_memcpy' mangled-name='memcpy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_default_numeric' mangled-name='zfs_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_numeric'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='9c313c2d'/>
</function-decl>
- <function-decl name='calloc' mangled-name='calloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_default_string' mangled-name='zfs_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_string'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='regexec' mangled-name='regexec' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_setonce' mangled-name='zfs_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_setonce'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='fileno' mangled-name='fileno' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_visible' mangled-name='zfs_prop_visible' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_visible'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='isatty' mangled-name='isatty' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_readonly' mangled-name='zfs_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_readonly'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='__builtin_putchar' mangled-name='putchar' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_get_type' mangled-name='zfs_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_type'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='31429eff'/>
</function-decl>
- <function-decl name='__getdelim' mangled-name='__getdelim' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_valid_for_type' mangled-name='zfs_prop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_for_type'>
+ <parameter type-id='95e97e5e' name='prop'/>
+ <parameter type-id='2e45de5d' name='types'/>
+ <parameter type-id='c19b74c3' name='headcheck'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='sigemptyset' mangled-name='sigemptyset' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_random_value' mangled-name='zfs_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_random_value'>
+ <parameter type-id='58603c44' name='prop'/>
+ <parameter type-id='9c313c2d' name='seed'/>
+ <return type-id='9c313c2d'/>
</function-decl>
- <function-decl name='sigaction' mangled-name='sigaction' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_index_to_string' mangled-name='zfs_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_index_to_string'>
+ <parameter type-id='58603c44' name='prop'/>
+ <parameter type-id='9c313c2d' name='index'/>
+ <parameter type-id='7d3cd834' name='string'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fputc' mangled-name='fputc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_string_to_index' mangled-name='zfs_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_string_to_index'>
+ <parameter type-id='58603c44' name='prop'/>
+ <parameter type-id='80f4b756' name='string'/>
+ <parameter type-id='5d6479ae' name='index'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fflush' mangled-name='fflush' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_written' mangled-name='zfs_prop_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_written'>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='tcgetattr' mangled-name='tcgetattr' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_userquota' mangled-name='zfs_prop_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_userquota'>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='tcsetattr' mangled-name='tcsetattr' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_user' mangled-name='zfs_prop_user' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_user'>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='getpid' mangled-name='getpid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_name_to_prop' mangled-name='zfs_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_to_prop'>
+ <parameter type-id='80f4b756' name='propname'/>
+ <return type-id='58603c44'/>
</function-decl>
- <function-decl name='kill' mangled-name='kill' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_delegatable' mangled-name='zfs_prop_delegatable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_delegatable'>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='__ctype_b_loc' mangled-name='__ctype_b_loc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_init' mangled-name='zfs_prop_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_init'>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zpool_get_features' mangled-name='zpool_get_features' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_get_table' mangled-name='zfs_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_table'>
+ <return type-id='76c8174b'/>
</function-decl>
- <function-decl name='zpool_get_prop_int' mangled-name='zpool_get_prop_int' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__fread_alias' mangled-name='fread' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='malloc' mangled-name='malloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='ferror' mangled-name='ferror' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fopen' mangled-name='fopen64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fclose' mangled-name='fclose' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__builtin_memmove' mangled-name='memmove' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='sscanf' mangled-name='sscanf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='PKCS5_PBKDF2_HMAC_SHA1' mangled-name='PKCS5_PBKDF2_HMAC_SHA1' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__open_alias' mangled-name='open64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__read_alias' mangled-name='read' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='close' mangled-name='close' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__builtin_strcpy' mangled-name='strcpy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_dataset.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-90'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZFS_WAIT_DELETEQ' value='0'/>
- <enumerator name='ZFS_WAIT_NUM_ACTIVITIES' value='1'/>
- </enum-decl>
- <typedef-decl name='zfs_wait_activity_t' type-id='type-id-90' id='type-id-91'/>
- <function-decl name='zfs_wait_status' mangled-name='zfs_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_wait_status'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-91' name='activity'/>
- <parameter type-id='type-id-85' name='missing'/>
- <parameter type-id='type-id-85' name='waited'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zvol_volsize_to_reservation' mangled-name='zvol_volsize_to_reservation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zvol_volsize_to_reservation'>
- <parameter type-id='type-id-4' name='zph'/>
- <parameter type-id='type-id-7' name='volsize'/>
- <parameter type-id='type-id-19' name='props'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <function-decl name='zfs_get_holds' mangled-name='zfs_get_holds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_holds'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-86' name='nvl'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_set_fsacl' mangled-name='zfs_set_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_set_fsacl'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-9' name='un'/>
- <parameter type-id='type-id-19' name='nvl'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_get_fsacl' mangled-name='zfs_get_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_fsacl'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-86' name='nvl'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_release' mangled-name='zfs_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_release'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='snapname'/>
- <parameter type-id='type-id-84' name='tag'/>
- <parameter type-id='type-id-9' name='recursive'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_hold_nvl' mangled-name='zfs_hold_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold_nvl'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-2' name='cleanup_fd'/>
- <parameter type-id='type-id-19' name='holds'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_hold' mangled-name='zfs_hold' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='snapname'/>
- <parameter type-id='type-id-84' name='tag'/>
- <parameter type-id='type-id-9' name='recursive'/>
- <parameter type-id='type-id-2' name='cleanup_fd'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-92'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZFS_PROP_USERUSED' value='0'/>
- <enumerator name='ZFS_PROP_USERQUOTA' value='1'/>
- <enumerator name='ZFS_PROP_GROUPUSED' value='2'/>
- <enumerator name='ZFS_PROP_GROUPQUOTA' value='3'/>
- <enumerator name='ZFS_PROP_USEROBJUSED' value='4'/>
- <enumerator name='ZFS_PROP_USEROBJQUOTA' value='5'/>
- <enumerator name='ZFS_PROP_GROUPOBJUSED' value='6'/>
- <enumerator name='ZFS_PROP_GROUPOBJQUOTA' value='7'/>
- <enumerator name='ZFS_PROP_PROJECTUSED' value='8'/>
- <enumerator name='ZFS_PROP_PROJECTQUOTA' value='9'/>
- <enumerator name='ZFS_PROP_PROJECTOBJUSED' value='10'/>
- <enumerator name='ZFS_PROP_PROJECTOBJQUOTA' value='11'/>
- <enumerator name='ZFS_NUM_USERQUOTA_PROPS' value='12'/>
+ <typedef-decl name='zfs_prop_t' type-id='3fed383f' id='58603c44'/>
+ <typedef-decl name='zfs_type_t' type-id='40ed39d4' id='2e45de5d'/>
+ <pointer-type-def type-id='9c313c2d' size-in-bits='64' id='5d6479ae'/>
+ <enum-decl name='__anonymous_enum__1' is-anonymous='yes' id='40ed39d4'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZFS_TYPE_FILESYSTEM' value='1'/>
+ <enumerator name='ZFS_TYPE_SNAPSHOT' value='2'/>
+ <enumerator name='ZFS_TYPE_VOLUME' value='4'/>
+ <enumerator name='ZFS_TYPE_POOL' value='8'/>
+ <enumerator name='ZFS_TYPE_BOOKMARK' value='16'/>
</enum-decl>
- <typedef-decl name='zfs_userquota_prop_t' type-id='type-id-92' id='type-id-93'/>
- <typedef-decl name='__uid_t' type-id='type-id-30' id='type-id-94'/>
- <typedef-decl name='uid_t' type-id='type-id-94' id='type-id-95'/>
- <pointer-type-def type-id='type-id-96' size-in-bits='64' id='type-id-97'/>
- <typedef-decl name='zfs_userspace_cb_t' type-id='type-id-97' id='type-id-98'/>
- <function-decl name='zfs_userspace' mangled-name='zfs_userspace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_userspace'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-93' name='type'/>
- <parameter type-id='type-id-98' name='func'/>
- <parameter type-id='type-id-13' name='arg'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_smb_acl_rename' mangled-name='zfs_smb_acl_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_rename'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-14' name='dataset'/>
- <parameter type-id='type-id-14' name='path'/>
- <parameter type-id='type-id-14' name='oldname'/>
- <parameter type-id='type-id-14' name='newname'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_smb_acl_purge' mangled-name='zfs_smb_acl_purge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_purge'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-14' name='dataset'/>
- <parameter type-id='type-id-14' name='path'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_smb_acl_remove' mangled-name='zfs_smb_acl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_remove'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-14' name='dataset'/>
- <parameter type-id='type-id-14' name='path'/>
- <parameter type-id='type-id-14' name='resource'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_smb_acl_add' mangled-name='zfs_smb_acl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_add'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-14' name='dataset'/>
- <parameter type-id='type-id-14' name='path'/>
- <parameter type-id='type-id-14' name='resource'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_prune_proplist' mangled-name='zfs_prune_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prune_proplist'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-68' name='props'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <class-decl name='zprop_list' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-99'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='pl_prop' type-id='type-id-2' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='pl_user_prop' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='pl_next' type-id='type-id-100' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='pl_all' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='pl_width' type-id='type-id-18' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='pl_recvd_width' type-id='type-id-18' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='pl_fixed' type-id='type-id-9' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-99' size-in-bits='64' id='type-id-100'/>
- <typedef-decl name='zprop_list_t' type-id='type-id-99' id='type-id-101'/>
- <pointer-type-def type-id='type-id-101' size-in-bits='64' id='type-id-102'/>
- <pointer-type-def type-id='type-id-102' size-in-bits='64' id='type-id-103'/>
- <function-decl name='zfs_expand_proplist' mangled-name='zfs_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_expand_proplist'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-103' name='plp'/>
- <parameter type-id='type-id-9' name='received'/>
- <parameter type-id='type-id-9' name='literal'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_get_user_props' mangled-name='zfs_get_user_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_user_props'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-19'/>
- </function-decl>
- <function-decl name='zfs_get_recvd_props' mangled-name='zfs_get_recvd_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_recvd_props'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-19'/>
- </function-decl>
- <function-decl name='zfs_get_all_props' mangled-name='zfs_get_all_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_all_props'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-19'/>
- </function-decl>
- <class-decl name='renameflags' size-in-bits='32' is-struct='yes' visibility='default' id='type-id-104'>
- <data-member access='public' layout-offset-in-bits='31'>
- <var-decl name='recursive' type-id='type-id-2' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='30'>
- <var-decl name='nounmount' type-id='type-id-2' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='29'>
- <var-decl name='forceunmount' type-id='type-id-2' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='renameflags_t' type-id='type-id-104' id='type-id-105'/>
- <function-decl name='zfs_rename' mangled-name='zfs_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rename'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='target'/>
- <parameter type-id='type-id-105' name='flags'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_rollback' mangled-name='zfs_rollback' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rollback'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-76' name='snap'/>
- <parameter type-id='type-id-9' name='force'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_snapshot' mangled-name='zfs_snapshot' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-9' name='recursive'/>
- <parameter type-id='type-id-19' name='props'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_snapshot_nvl' mangled-name='zfs_snapshot_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot_nvl'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-19' name='snaps'/>
- <parameter type-id='type-id-19' name='props'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_promote' mangled-name='zfs_promote' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_promote'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_clone' mangled-name='zfs_clone' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_clone'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='target'/>
- <parameter type-id='type-id-19' name='props'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_destroy_snaps_nvl' mangled-name='zfs_destroy_snaps_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps_nvl'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-19' name='snaps'/>
- <parameter type-id='type-id-9' name='defer'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_destroy_snaps' mangled-name='zfs_destroy_snaps' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-14' name='snapname'/>
- <parameter type-id='type-id-9' name='defer'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_destroy' mangled-name='zfs_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-9' name='defer'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_create' mangled-name='zfs_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-66' name='type'/>
- <parameter type-id='type-id-19' name='props'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_create_ancestors' mangled-name='zfs_create_ancestors' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create_ancestors'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='path'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_parent_name' mangled-name='zfs_parent_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parent_name'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-14' name='buf'/>
- <parameter type-id='type-id-18' name='buflen'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <qualified-type-def type-id='type-id-75' const='yes' id='type-id-106'/>
- <pointer-type-def type-id='type-id-106' size-in-bits='64' id='type-id-107'/>
- <function-decl name='zfs_get_underlying_type' mangled-name='zfs_get_underlying_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_underlying_type'>
- <parameter type-id='type-id-107' name='zhp'/>
- <return type-id='type-id-66'/>
- </function-decl>
- <function-decl name='zfs_get_type' mangled-name='zfs_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_type'>
- <parameter type-id='type-id-107' name='zhp'/>
- <return type-id='type-id-66'/>
- </function-decl>
- <function-decl name='zfs_get_pool_name' mangled-name='zfs_get_pool_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_name'>
- <parameter type-id='type-id-107' name='zhp'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_get_name' mangled-name='zfs_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_name'>
- <parameter type-id='type-id-107' name='zhp'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_prop_get_written' mangled-name='zfs_prop_get_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-14' name='propbuf'/>
- <parameter type-id='type-id-2' name='proplen'/>
- <parameter type-id='type-id-9' name='literal'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <pointer-type-def type-id='type-id-7' size-in-bits='64' id='type-id-108'/>
- <function-decl name='zfs_prop_get_written_int' mangled-name='zfs_prop_get_written_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written_int'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-108' name='propvalue'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_prop_get_userquota' mangled-name='zfs_prop_get_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-14' name='propbuf'/>
- <parameter type-id='type-id-2' name='proplen'/>
- <parameter type-id='type-id-9' name='literal'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_prop_get_userquota_int' mangled-name='zfs_prop_get_userquota_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota_int'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-108' name='propvalue'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-109'>
- <underlying-type type-id='type-id-41'/>
+ <enum-decl name='__anonymous_enum__2' is-anonymous='yes' id='3fed383f'>
+ <underlying-type type-id='9cac1fee'/>
<enumerator name='ZPROP_CONT' value='-2'/>
<enumerator name='ZPROP_INVAL' value='-1'/>
<enumerator name='ZFS_PROP_TYPE' value='0'/>
<enumerator name='ZFS_PROP_CREATION' value='1'/>
<enumerator name='ZFS_PROP_USED' value='2'/>
<enumerator name='ZFS_PROP_AVAILABLE' value='3'/>
<enumerator name='ZFS_PROP_REFERENCED' value='4'/>
<enumerator name='ZFS_PROP_COMPRESSRATIO' value='5'/>
<enumerator name='ZFS_PROP_MOUNTED' value='6'/>
<enumerator name='ZFS_PROP_ORIGIN' value='7'/>
<enumerator name='ZFS_PROP_QUOTA' value='8'/>
<enumerator name='ZFS_PROP_RESERVATION' value='9'/>
<enumerator name='ZFS_PROP_VOLSIZE' value='10'/>
<enumerator name='ZFS_PROP_VOLBLOCKSIZE' value='11'/>
<enumerator name='ZFS_PROP_RECORDSIZE' value='12'/>
<enumerator name='ZFS_PROP_MOUNTPOINT' value='13'/>
<enumerator name='ZFS_PROP_SHARENFS' value='14'/>
<enumerator name='ZFS_PROP_CHECKSUM' value='15'/>
<enumerator name='ZFS_PROP_COMPRESSION' value='16'/>
<enumerator name='ZFS_PROP_ATIME' value='17'/>
<enumerator name='ZFS_PROP_DEVICES' value='18'/>
<enumerator name='ZFS_PROP_EXEC' value='19'/>
<enumerator name='ZFS_PROP_SETUID' value='20'/>
<enumerator name='ZFS_PROP_READONLY' value='21'/>
<enumerator name='ZFS_PROP_ZONED' value='22'/>
<enumerator name='ZFS_PROP_SNAPDIR' value='23'/>
<enumerator name='ZFS_PROP_ACLMODE' value='24'/>
<enumerator name='ZFS_PROP_ACLINHERIT' value='25'/>
<enumerator name='ZFS_PROP_CREATETXG' value='26'/>
<enumerator name='ZFS_PROP_NAME' value='27'/>
<enumerator name='ZFS_PROP_CANMOUNT' value='28'/>
<enumerator name='ZFS_PROP_ISCSIOPTIONS' value='29'/>
<enumerator name='ZFS_PROP_XATTR' value='30'/>
<enumerator name='ZFS_PROP_NUMCLONES' value='31'/>
<enumerator name='ZFS_PROP_COPIES' value='32'/>
<enumerator name='ZFS_PROP_VERSION' value='33'/>
<enumerator name='ZFS_PROP_UTF8ONLY' value='34'/>
<enumerator name='ZFS_PROP_NORMALIZE' value='35'/>
<enumerator name='ZFS_PROP_CASE' value='36'/>
<enumerator name='ZFS_PROP_VSCAN' value='37'/>
<enumerator name='ZFS_PROP_NBMAND' value='38'/>
<enumerator name='ZFS_PROP_SHARESMB' value='39'/>
<enumerator name='ZFS_PROP_REFQUOTA' value='40'/>
<enumerator name='ZFS_PROP_REFRESERVATION' value='41'/>
<enumerator name='ZFS_PROP_GUID' value='42'/>
<enumerator name='ZFS_PROP_PRIMARYCACHE' value='43'/>
<enumerator name='ZFS_PROP_SECONDARYCACHE' value='44'/>
<enumerator name='ZFS_PROP_USEDSNAP' value='45'/>
<enumerator name='ZFS_PROP_USEDDS' value='46'/>
<enumerator name='ZFS_PROP_USEDCHILD' value='47'/>
<enumerator name='ZFS_PROP_USEDREFRESERV' value='48'/>
<enumerator name='ZFS_PROP_USERACCOUNTING' value='49'/>
<enumerator name='ZFS_PROP_STMF_SHAREINFO' value='50'/>
<enumerator name='ZFS_PROP_DEFER_DESTROY' value='51'/>
<enumerator name='ZFS_PROP_USERREFS' value='52'/>
<enumerator name='ZFS_PROP_LOGBIAS' value='53'/>
<enumerator name='ZFS_PROP_UNIQUE' value='54'/>
<enumerator name='ZFS_PROP_OBJSETID' value='55'/>
<enumerator name='ZFS_PROP_DEDUP' value='56'/>
<enumerator name='ZFS_PROP_MLSLABEL' value='57'/>
<enumerator name='ZFS_PROP_SYNC' value='58'/>
<enumerator name='ZFS_PROP_DNODESIZE' value='59'/>
<enumerator name='ZFS_PROP_REFRATIO' value='60'/>
<enumerator name='ZFS_PROP_WRITTEN' value='61'/>
<enumerator name='ZFS_PROP_CLONES' value='62'/>
<enumerator name='ZFS_PROP_LOGICALUSED' value='63'/>
<enumerator name='ZFS_PROP_LOGICALREFERENCED' value='64'/>
<enumerator name='ZFS_PROP_INCONSISTENT' value='65'/>
<enumerator name='ZFS_PROP_VOLMODE' value='66'/>
<enumerator name='ZFS_PROP_FILESYSTEM_LIMIT' value='67'/>
<enumerator name='ZFS_PROP_SNAPSHOT_LIMIT' value='68'/>
<enumerator name='ZFS_PROP_FILESYSTEM_COUNT' value='69'/>
<enumerator name='ZFS_PROP_SNAPSHOT_COUNT' value='70'/>
<enumerator name='ZFS_PROP_SNAPDEV' value='71'/>
<enumerator name='ZFS_PROP_ACLTYPE' value='72'/>
<enumerator name='ZFS_PROP_SELINUX_CONTEXT' value='73'/>
<enumerator name='ZFS_PROP_SELINUX_FSCONTEXT' value='74'/>
<enumerator name='ZFS_PROP_SELINUX_DEFCONTEXT' value='75'/>
<enumerator name='ZFS_PROP_SELINUX_ROOTCONTEXT' value='76'/>
<enumerator name='ZFS_PROP_RELATIME' value='77'/>
<enumerator name='ZFS_PROP_REDUNDANT_METADATA' value='78'/>
<enumerator name='ZFS_PROP_OVERLAY' value='79'/>
<enumerator name='ZFS_PROP_PREV_SNAP' value='80'/>
<enumerator name='ZFS_PROP_RECEIVE_RESUME_TOKEN' value='81'/>
<enumerator name='ZFS_PROP_ENCRYPTION' value='82'/>
<enumerator name='ZFS_PROP_KEYLOCATION' value='83'/>
<enumerator name='ZFS_PROP_KEYFORMAT' value='84'/>
<enumerator name='ZFS_PROP_PBKDF2_SALT' value='85'/>
<enumerator name='ZFS_PROP_PBKDF2_ITERS' value='86'/>
<enumerator name='ZFS_PROP_ENCRYPTION_ROOT' value='87'/>
<enumerator name='ZFS_PROP_KEY_GUID' value='88'/>
<enumerator name='ZFS_PROP_KEYSTATUS' value='89'/>
<enumerator name='ZFS_PROP_REMAPTXG' value='90'/>
<enumerator name='ZFS_PROP_SPECIAL_SMALL_BLOCKS' value='91'/>
<enumerator name='ZFS_PROP_IVSET_GUID' value='92'/>
<enumerator name='ZFS_PROP_REDACTED' value='93'/>
<enumerator name='ZFS_PROP_REDACT_SNAPS' value='94'/>
<enumerator name='ZFS_NUM_PROPS' value='95'/>
</enum-decl>
- <typedef-decl name='zfs_prop_t' type-id='type-id-109' id='type-id-110'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-111'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZPROP_SRC_NONE' value='1'/>
- <enumerator name='ZPROP_SRC_DEFAULT' value='2'/>
- <enumerator name='ZPROP_SRC_TEMPORARY' value='4'/>
- <enumerator name='ZPROP_SRC_LOCAL' value='8'/>
- <enumerator name='ZPROP_SRC_INHERITED' value='16'/>
- <enumerator name='ZPROP_SRC_RECEIVED' value='32'/>
- </enum-decl>
- <typedef-decl name='zprop_source_t' type-id='type-id-111' id='type-id-112'/>
- <pointer-type-def type-id='type-id-112' size-in-bits='64' id='type-id-113'/>
- <function-decl name='zfs_prop_get_numeric' mangled-name='zfs_prop_get_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_numeric'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-110' name='prop'/>
- <parameter type-id='type-id-108' name='value'/>
- <parameter type-id='type-id-113' name='src'/>
- <parameter type-id='type-id-14' name='statbuf'/>
- <parameter type-id='type-id-18' name='statlen'/>
- <return type-id='type-id-2'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zpool_prop.c' language='LANG_C99'>
+ <function-decl name='zpool_prop_align_right' mangled-name='zpool_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_align_right'>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='zfs_prop_get_int' mangled-name='zfs_prop_get_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_int'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-7'/>
+ <function-decl name='zpool_prop_column_name' mangled-name='zpool_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_column_name'>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='zfs_prop_get' mangled-name='zfs_prop_get' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-110' name='prop'/>
- <parameter type-id='type-id-14' name='propbuf'/>
- <parameter type-id='type-id-18' name='proplen'/>
- <parameter type-id='type-id-113' name='src'/>
- <parameter type-id='type-id-14' name='statbuf'/>
- <parameter type-id='type-id-18' name='statlen'/>
- <parameter type-id='type-id-9' name='literal'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zpool_prop_values' mangled-name='zpool_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_values'>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='zfs_get_clones_nvl' mangled-name='zfs_get_clones_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_clones_nvl'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-19'/>
+ <function-decl name='zpool_prop_random_value' mangled-name='zpool_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_random_value'>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <parameter type-id='9c313c2d' name='seed'/>
+ <return type-id='9c313c2d'/>
</function-decl>
- <function-decl name='zfs_prop_get_recvd' mangled-name='zfs_prop_get_recvd' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_recvd'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-14' name='propbuf'/>
- <parameter type-id='type-id-18' name='proplen'/>
- <parameter type-id='type-id-9' name='literal'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zpool_prop_index_to_string' mangled-name='zpool_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_index_to_string'>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <parameter type-id='9c313c2d' name='index'/>
+ <parameter type-id='7d3cd834' name='string'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_prop_inherit' mangled-name='zfs_prop_inherit' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inherit'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-9' name='received'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zpool_prop_string_to_index' mangled-name='zpool_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_string_to_index'>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <parameter type-id='80f4b756' name='string'/>
+ <parameter type-id='5d6479ae' name='index'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_prop_set_list' mangled-name='zfs_prop_set_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set_list'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-19' name='props'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zpool_prop_unsupported' mangled-name='zpool_prop_unsupported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_unsupported'>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='zfs_prop_set' mangled-name='zfs_prop_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-84' name='propval'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zpool_prop_feature' mangled-name='zpool_prop_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_feature'>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='zfs_valid_proplist' mangled-name='zfs_valid_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_valid_proplist'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-66' name='type'/>
- <parameter type-id='type-id-19' name='nvl'/>
- <parameter type-id='type-id-7' name='zoned'/>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-4' name='zpool_hdl'/>
- <parameter type-id='type-id-9' name='key_params_ok'/>
- <parameter type-id='type-id-84' name='errbuf'/>
- <return type-id='type-id-19'/>
- </function-decl>
- <pointer-type-def type-id='type-id-2' size-in-bits='64' id='type-id-114'/>
- <function-decl name='zfs_spa_version' mangled-name='zfs_spa_version' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-114' name='spa_version'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zpool_prop_default_numeric' mangled-name='zpool_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_numeric'>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <return type-id='9c313c2d'/>
</function-decl>
- <function-decl name='libzfs_mnttab_remove' mangled-name='libzfs_mnttab_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_remove'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='fsname'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_prop_default_string' mangled-name='zpool_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_string'>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='libzfs_mnttab_add' mangled-name='libzfs_mnttab_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_add'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='special'/>
- <parameter type-id='type-id-84' name='mountp'/>
- <parameter type-id='type-id-84' name='mntopts'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_prop_setonce' mangled-name='zpool_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_setonce'>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='libzfs_mnttab_cache' mangled-name='libzfs_mnttab_cache' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_cache'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-9' name='enable'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_prop_readonly' mangled-name='zpool_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_readonly'>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='libzfs_mnttab_fini' mangled-name='libzfs_mnttab_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_fini'>
- <parameter type-id='type-id-16' name='hdl'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_prop_get_type' mangled-name='zpool_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_type'>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <return type-id='31429eff'/>
</function-decl>
- <function-decl name='libzfs_mnttab_init' mangled-name='libzfs_mnttab_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_init'>
- <parameter type-id='type-id-16' name='hdl'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_prop_to_name' mangled-name='zpool_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_to_name'>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='zfs_close' mangled-name='zfs_close' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_close'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_name_to_prop' mangled-name='zpool_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_name_to_prop'>
+ <parameter type-id='80f4b756' name='propname'/>
+ <return type-id='5d0c23fb'/>
</function-decl>
- <function-decl name='zfs_open' mangled-name='zfs_open' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_open'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-2' name='types'/>
- <return type-id='type-id-76'/>
+ <function-decl name='zpool_prop_init' mangled-name='zpool_prop_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_init'>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zfs_bookmark_exists' mangled-name='zfs_bookmark_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_bookmark_exists'>
- <parameter type-id='type-id-84' name='path'/>
- <return type-id='type-id-9'/>
+ <function-decl name='zpool_prop_get_table' mangled-name='zpool_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_table'>
+ <return type-id='76c8174b'/>
</function-decl>
- <function-decl name='zfs_handle_dup' mangled-name='zfs_handle_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_handle_dup'>
- <parameter type-id='type-id-76' name='zhp_orig'/>
- <return type-id='type-id-76'/>
+ <typedef-decl name='zpool_prop_t' type-id='40ed39d5' id='5d0c23fb'/>
+ <enum-decl name='__anonymous_enum__1' is-anonymous='yes' id='40ed39d5'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZPOOL_PROP_INVAL' value='-1'/>
+ <enumerator name='ZPOOL_PROP_NAME' value='0'/>
+ <enumerator name='ZPOOL_PROP_SIZE' value='1'/>
+ <enumerator name='ZPOOL_PROP_CAPACITY' value='2'/>
+ <enumerator name='ZPOOL_PROP_ALTROOT' value='3'/>
+ <enumerator name='ZPOOL_PROP_HEALTH' value='4'/>
+ <enumerator name='ZPOOL_PROP_GUID' value='5'/>
+ <enumerator name='ZPOOL_PROP_VERSION' value='6'/>
+ <enumerator name='ZPOOL_PROP_BOOTFS' value='7'/>
+ <enumerator name='ZPOOL_PROP_DELEGATION' value='8'/>
+ <enumerator name='ZPOOL_PROP_AUTOREPLACE' value='9'/>
+ <enumerator name='ZPOOL_PROP_CACHEFILE' value='10'/>
+ <enumerator name='ZPOOL_PROP_FAILUREMODE' value='11'/>
+ <enumerator name='ZPOOL_PROP_LISTSNAPS' value='12'/>
+ <enumerator name='ZPOOL_PROP_AUTOEXPAND' value='13'/>
+ <enumerator name='ZPOOL_PROP_DEDUPDITTO' value='14'/>
+ <enumerator name='ZPOOL_PROP_DEDUPRATIO' value='15'/>
+ <enumerator name='ZPOOL_PROP_FREE' value='16'/>
+ <enumerator name='ZPOOL_PROP_ALLOCATED' value='17'/>
+ <enumerator name='ZPOOL_PROP_READONLY' value='18'/>
+ <enumerator name='ZPOOL_PROP_ASHIFT' value='19'/>
+ <enumerator name='ZPOOL_PROP_COMMENT' value='20'/>
+ <enumerator name='ZPOOL_PROP_EXPANDSZ' value='21'/>
+ <enumerator name='ZPOOL_PROP_FREEING' value='22'/>
+ <enumerator name='ZPOOL_PROP_FRAGMENTATION' value='23'/>
+ <enumerator name='ZPOOL_PROP_LEAKED' value='24'/>
+ <enumerator name='ZPOOL_PROP_MAXBLOCKSIZE' value='25'/>
+ <enumerator name='ZPOOL_PROP_TNAME' value='26'/>
+ <enumerator name='ZPOOL_PROP_MAXDNODESIZE' value='27'/>
+ <enumerator name='ZPOOL_PROP_MULTIHOST' value='28'/>
+ <enumerator name='ZPOOL_PROP_CHECKPOINT' value='29'/>
+ <enumerator name='ZPOOL_PROP_LOAD_GUID' value='30'/>
+ <enumerator name='ZPOOL_PROP_AUTOTRIM' value='31'/>
+ <enumerator name='ZPOOL_PROP_COMPATIBILITY' value='32'/>
+ <enumerator name='ZPOOL_NUM_PROPS' value='33'/>
+ </enum-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zprop_common.c' language='LANG_C99'>
+ <function-decl name='zprop_width' mangled-name='zprop_width' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_width'>
+ <parameter type-id='95e97e5e' name='prop'/>
+ <parameter type-id='37e3bd22' name='fixed'/>
+ <parameter type-id='2e45de5d' name='type'/>
+ <return type-id='b59d7dce'/>
</function-decl>
- <function-decl name='zfs_refresh_properties' mangled-name='zfs_refresh_properties' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_refresh_properties'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zprop_valid_for_type' mangled-name='zprop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_valid_for_type'>
+ <parameter type-id='95e97e5e' name='prop'/>
+ <parameter type-id='2e45de5d' name='type'/>
+ <parameter type-id='c19b74c3' name='headcheck'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='zpool_free_handles' mangled-name='zpool_free_handles' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_free_handles'>
- <parameter type-id='type-id-16' name='hdl'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zprop_values' mangled-name='zprop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_values'>
+ <parameter type-id='95e97e5e' name='prop'/>
+ <parameter type-id='2e45de5d' name='type'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='zfs_name_valid' mangled-name='zfs_name_valid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_valid'>
- <parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-66' name='type'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zprop_random_value' mangled-name='zprop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_random_value'>
+ <parameter type-id='95e97e5e' name='prop'/>
+ <parameter type-id='9c313c2d' name='seed'/>
+ <parameter type-id='2e45de5d' name='type'/>
+ <return type-id='9c313c2d'/>
</function-decl>
- <function-decl name='zfs_type_to_name' mangled-name='zfs_type_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_type_to_name'>
- <parameter type-id='type-id-66' name='type'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zprop_index_to_string' mangled-name='zprop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_index_to_string'>
+ <parameter type-id='95e97e5e' name='prop'/>
+ <parameter type-id='9c313c2d' name='index'/>
+ <parameter type-id='7d3cd834' name='string'/>
+ <parameter type-id='2e45de5d' name='type'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zprop_string_to_index' mangled-name='zprop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_string_to_index'>
+ <parameter type-id='95e97e5e' name='prop'/>
+ <parameter type-id='80f4b756' name='string'/>
+ <parameter type-id='5d6479ae' name='index'/>
+ <parameter type-id='2e45de5d' name='type'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zprop_name_to_prop' mangled-name='zprop_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_name_to_prop'>
+ <parameter type-id='80f4b756' name='propname'/>
+ <parameter type-id='2e45de5d' name='type'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-115'>
+ <function-decl name='zprop_iter_common' mangled-name='zprop_iter_common' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter_common'>
+ <parameter type-id='1ec3747a' name='func'/>
+ <parameter type-id='eaa32e2f' name='cb'/>
+ <parameter type-id='c19b74c3' name='show_all'/>
+ <parameter type-id='c19b74c3' name='ordered'/>
+ <parameter type-id='2e45de5d' name='type'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zprop_register_hidden' mangled-name='zprop_register_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_hidden'>
+ <parameter type-id='95e97e5e' name='prop'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='31429eff' name='type'/>
+ <parameter type-id='999701cc' name='attr'/>
+ <parameter type-id='95e97e5e' name='objset_types'/>
+ <parameter type-id='80f4b756' name='colname'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zprop_register_index' mangled-name='zprop_register_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_index'>
+ <parameter type-id='95e97e5e' name='prop'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='9c313c2d' name='def'/>
+ <parameter type-id='999701cc' name='attr'/>
+ <parameter type-id='95e97e5e' name='objset_types'/>
+ <parameter type-id='80f4b756' name='values'/>
+ <parameter type-id='80f4b756' name='colname'/>
+ <parameter type-id='c8bc397b' name='idx_tbl'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zprop_register_number' mangled-name='zprop_register_number' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_number'>
+ <parameter type-id='95e97e5e' name='prop'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='9c313c2d' name='def'/>
+ <parameter type-id='999701cc' name='attr'/>
+ <parameter type-id='95e97e5e' name='objset_types'/>
+ <parameter type-id='80f4b756' name='values'/>
+ <parameter type-id='80f4b756' name='colname'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zprop_register_string' mangled-name='zprop_register_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_string'>
+ <parameter type-id='95e97e5e' name='prop'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='80f4b756' name='def'/>
+ <parameter type-id='999701cc' name='attr'/>
+ <parameter type-id='95e97e5e' name='objset_types'/>
+ <parameter type-id='80f4b756' name='values'/>
+ <parameter type-id='80f4b756' name='colname'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zprop_register_impl' mangled-name='zprop_register_impl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_impl'>
+ <parameter type-id='95e97e5e' name='prop'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='31429eff' name='type'/>
+ <parameter type-id='9c313c2d' name='numdefault'/>
+ <parameter type-id='80f4b756' name='strdefault'/>
+ <parameter type-id='999701cc' name='attr'/>
+ <parameter type-id='95e97e5e' name='objset_types'/>
+ <parameter type-id='80f4b756' name='values'/>
+ <parameter type-id='80f4b756' name='colname'/>
+ <parameter type-id='c19b74c3' name='rightalign'/>
+ <parameter type-id='c19b74c3' name='visible'/>
+ <parameter type-id='c8bc397b' name='idx_tbl'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <pointer-type-def type-id='c19b74c3' size-in-bits='64' id='37e3bd22'/>
+ <typedef-decl name='zprop_func' type-id='2e711a2a' id='1ec3747a'/>
+ <pointer-type-def type-id='c70fa2e8' size-in-bits='64' id='2e711a2a'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_changelist.c' language='LANG_C99'>
+ <type-decl name='void' id='48b5725f'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_config.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='bf311473' size-in-bits='128' id='f0f65199'>
+ <subrange length='2' type-id='7359adad' id='52efc4ef'/>
+ </array-type-def>
+ <type-decl name='char' size-in-bits='8' id='a84c031d'/>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='8192' id='b54ce520'>
+ <subrange length='1024' type-id='7359adad' id='c60446f8'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='2048' id='d1617432'>
+ <subrange length='256' type-id='7359adad' id='36e5b9fa'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='320' id='36c46961'>
+ <subrange length='40' type-id='7359adad' id='8f80b239'/>
+ </array-type-def>
+ <class-decl name='uu_avl' is-struct='yes' visibility='default' is-declaration-only='yes' id='4af029d1'/>
+ <class-decl name='uu_avl_pool' is-struct='yes' visibility='default' is-declaration-only='yes' id='12a530a8'/>
+ <type-decl name='int' size-in-bits='32' id='95e97e5e'/>
+ <type-decl name='long int' size-in-bits='64' id='bd54fe1a'/>
+ <type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
+ <type-decl name='short int' size-in-bits='16' id='a2185560'/>
+ <type-decl name='unnamed-enum-underlying-type-32' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='9cac1fee'/>
+ <type-decl name='unsigned char' size-in-bits='8' id='002ac4a6'/>
+ <type-decl name='unsigned int' size-in-bits='32' id='f0981eeb'/>
+ <type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
+ <class-decl name='libzfs_handle' size-in-bits='18240' is-struct='yes' visibility='default' id='c8a9d9d8'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_special' type-id='type-id-14' visibility='default'/>
+ <var-decl name='libzfs_error' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='libzfs_fd' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_mountp' type-id='type-id-14' visibility='default'/>
+ <var-decl name='libzfs_pool_handles' type-id='4c81de99' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_fstype' type-id='type-id-14' visibility='default'/>
+ <var-decl name='libzfs_ns_avlpool' type-id='de82c773' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_mntopts' type-id='type-id-14' visibility='default'/>
+ <var-decl name='libzfs_ns_avl' type-id='a5c21a38' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-115' size-in-bits='64' id='type-id-116'/>
- <function-decl name='libzfs_mnttab_find' mangled-name='libzfs_mnttab_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_find'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='fsname'/>
- <parameter type-id='type-id-116' name='entry'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <pointer-type-def type-id='type-id-14' size-in-bits='64' id='type-id-117'/>
- <function-decl name='getprop_uint64' mangled-name='getprop_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getprop_uint64'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-110' name='prop'/>
- <parameter type-id='type-id-117' name='source'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <function-decl name='zfs_dataset_exists' mangled-name='zfs_dataset_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_exists'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-66' name='types'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='lzc_wait_fs' mangled-name='lzc_wait_fs' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_standard_error_fmt' mangled-name='zfs_standard_error_fmt' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_nvlist_array' mangled-name='nvlist_lookup_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_get_config' mangled-name='zpool_get_config' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strtol' mangled-name='strtol' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_get_holds' mangled-name='lzc_get_holds' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_size' mangled-name='nvlist_size' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_pack' mangled-name='nvlist_pack' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_unpack' mangled-name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strerror' mangled-name='strerror' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_empty' mangled-name='nvlist_empty' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvlist_free' mangled-name='fnvlist_free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_release' mangled-name='lzc_release' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvpair_value_int32' mangled-name='fnvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvlist_add_boolean' mangled-name='fnvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvlist_add_nvlist' mangled-name='fnvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_hold' mangled-name='lzc_hold' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='ioctl' mangled-name='ioctl' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_alloc' mangled-name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zcmd_write_src_nvlist' mangled-name='zcmd_write_src_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_type' mangled-name='nvpair_type' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_remove' mangled-name='nvlist_remove' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zprop_expand_list' mangled-name='zprop_expand_list' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='changelist_gather' mangled-name='changelist_gather' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='changelist_haszonedchild' mangled-name='changelist_haszonedchild' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='changelist_free' mangled-name='changelist_free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='changelist_rename' mangled-name='changelist_rename' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='changelist_postfix' mangled-name='changelist_postfix' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='changelist_prefix' mangled-name='changelist_prefix' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_iter_snapshots' mangled-name='zfs_iter_snapshots' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_iter_bookmarks' mangled-name='zfs_iter_bookmarks' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_rollback_to' mangled-name='lzc_rollback_to' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='changelist_remove' mangled-name='changelist_remove' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strcspn' mangled-name='strcspn' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_open' mangled-name='zpool_open' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_close' mangled-name='zpool_close' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_snapshot' mangled-name='lzc_snapshot' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_promote' mangled-name='lzc_promote' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_crypto_clone_check' mangled-name='zfs_crypto_clone_check' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_clone' mangled-name='lzc_clone' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_destroy_snaps' mangled-name='lzc_destroy_snaps' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_destroy_bookmarks' mangled-name='lzc_destroy_bookmarks' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_destroy' mangled-name='lzc_destroy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_default_numeric' mangled-name='zfs_prop_default_numeric' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='dataset_nestcheck' mangled-name='dataset_nestcheck' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_crypto_create' mangled-name='zfs_crypto_create' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_create' mangled-name='lzc_create' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strdup' mangled-name='strdup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_share' mangled-name='zfs_share' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_commit_all_shares' mangled-name='zfs_commit_all_shares' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__builtin___strncpy_chk' mangled-name='__strncpy_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strrchr' mangled-name='strrchr' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_nicebytes' mangled-name='zfs_nicebytes' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_nicenum' mangled-name='zfs_nicenum' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_valid_for_type' mangled-name='zfs_prop_valid_for_type' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_error_fmt' mangled-name='zfs_error_fmt' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='localtime_r' mangled-name='localtime_r' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strftime' mangled-name='strftime' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_get_prop' mangled-name='zpool_get_prop' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__builtin_snprintf' mangled-name='snprintf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint64_array' mangled-name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_get_type' mangled-name='zfs_prop_get_type' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_index_to_string' mangled-name='zfs_prop_index_to_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_readonly' mangled-name='zfs_prop_readonly' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='abort' mangled-name='abort' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_lookup_int64' mangled-name='nvlist_lookup_int64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__fprintf_chk' mangled-name='__fprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvlist_add_string' mangled-name='fnvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_channel_program_nosync' mangled-name='lzc_channel_program_nosync' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_nvlist' mangled-name='fnvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strsep' mangled-name='strsep' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_nvlist' mangled-name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='hasmntopt' mangled-name='hasmntopt' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_setonce' mangled-name='zfs_prop_setonce' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_inheritable' mangled-name='zfs_prop_inheritable' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_user' mangled-name='zfs_prop_user' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_setprop_error' mangled-name='zfs_setprop_error' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvlist_add_uint64' mangled-name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvpair_value_uint64' mangled-name='fnvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__asprintf_chk' mangled-name='__asprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_uint64' mangled-name='nvpair_value_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_uint64_array' mangled-name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_string' mangled-name='nvpair_value_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_nicestrtonum' mangled-name='zfs_nicestrtonum' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_prop_get_feature' mangled-name='zpool_prop_get_feature' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='mountpoint_namecheck' mangled-name='mountpoint_namecheck' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_parse_options' mangled-name='zfs_parse_options' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_encryption_key_param' mangled-name='zfs_prop_encryption_key_param' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zprop_parse_value' mangled-name='zprop_parse_value' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_userquota' mangled-name='zfs_prop_userquota' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_written' mangled-name='zfs_prop_written' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_valid_keylocation' mangled-name='zfs_prop_valid_keylocation' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_mutex_lock' mangled-name='pthread_mutex_lock' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_remove' mangled-name='avl_remove' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_mutex_unlock' mangled-name='pthread_mutex_unlock' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_numnodes' mangled-name='avl_numnodes' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_add' mangled-name='avl_add' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_mutex_destroy' mangled-name='pthread_mutex_destroy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_mutex_init' mangled-name='pthread_mutex_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_get_bookmarks' mangled-name='lzc_get_bookmarks' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_get_name' mangled-name='zpool_get_name' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_open_canfail' mangled-name='zpool_open_canfail' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_name_valid' mangled-name='zpool_name_valid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strtoul' mangled-name='strtoul' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='getgrnam' mangled-name='getgrnam' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='getpwnam' mangled-name='getpwnam' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_prop_default_string' mangled-name='zfs_prop_default_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_string' mangled-name='fnvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strstr' mangled-name='strstr' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='entity_namecheck' mangled-name='entity_namecheck' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_exists' mangled-name='lzc_exists' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_boolean' mangled-name='nvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='getmntany' mangled-name='getmntany' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='_sol_getmntent' mangled-name='_sol_getmntent' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_remove_all' mangled-name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-type size-in-bits='64' id='type-id-96'>
- <parameter type-id='type-id-13'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-95'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-2'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_diff.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zfs_show_diffs' mangled-name='zfs_show_diffs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_show_diffs'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-2' name='outfd'/>
- <parameter type-id='type-id-84' name='fromsnap'/>
- <parameter type-id='type-id-84' name='tosnap'/>
- <parameter type-id='type-id-2' name='flags'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='__builtin_strncpy' mangled-name='strncpy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_asprintf' mangled-name='zfs_asprintf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_validate_name' mangled-name='zfs_validate_name' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='find_shares_object' mangled-name='find_shares_object' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pipe2' mangled-name='pipe2' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_create' mangled-name='pthread_create' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_cancel' mangled-name='pthread_cancel' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pthread_join' mangled-name='pthread_join' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fdopen' mangled-name='fdopen' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__builtin_fwrite' mangled-name='fwrite' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='is_mounted' mangled-name='is_mounted' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_import.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <class-decl name='pool_config_ops' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-118'>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='libzfs_ns_gen' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='libzfs_desc_active' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='352'>
+ <var-decl name='libzfs_action' type-id='b54ce520' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='8544'>
+ <var-decl name='libzfs_desc' type-id='b54ce520' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='16736'>
+ <var-decl name='libzfs_printerr' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='16768'>
+ <var-decl name='libzfs_mnttab_enable' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='16832'>
+ <var-decl name='libzfs_mnttab_cache_lock' type-id='7a6844eb' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='17152'>
+ <var-decl name='libzfs_mnttab_cache' type-id='f20fbd51' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='17472'>
+ <var-decl name='libzfs_pool_iter' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='17504'>
+ <var-decl name='libzfs_prop_debug' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='17536'>
+ <var-decl name='libzfs_urire' type-id='aca3bac8' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='18048'>
+ <var-decl name='libzfs_max_nvlist' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='18112'>
+ <var-decl name='libfetch' type-id='eaa32e2f' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='18176'>
+ <var-decl name='libfetch_load_error' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='zpool_handle' size-in-bits='2560' is-struct='yes' visibility='default' id='67002a8a'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='pco_refresh_config' type-id='type-id-119' visibility='default'/>
+ <var-decl name='zpool_hdl' type-id='b0382bb3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='pco_pool_active' type-id='type-id-120' visibility='default'/>
+ <var-decl name='zpool_next' type-id='4c81de99' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='zpool_name' type-id='d1617432' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2176'>
+ <var-decl name='zpool_state' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2240'>
+ <var-decl name='zpool_config_size' type-id='b59d7dce' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2304'>
+ <var-decl name='zpool_config' type-id='5ce45b60' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2368'>
+ <var-decl name='zpool_old_config' type-id='5ce45b60' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2432'>
+ <var-decl name='zpool_props' type-id='5ce45b60' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2496'>
+ <var-decl name='zpool_start_block' type-id='804dc465' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='refresh_config_func_t' type-id='type-id-121' id='type-id-122'/>
- <pointer-type-def type-id='type-id-122' size-in-bits='64' id='type-id-119'/>
- <typedef-decl name='pool_active_func_t' type-id='type-id-123' id='type-id-124'/>
- <pointer-type-def type-id='type-id-124' size-in-bits='64' id='type-id-120'/>
- <qualified-type-def type-id='type-id-118' const='yes' id='type-id-125'/>
- <typedef-decl name='pool_config_ops_t' type-id='type-id-125' id='type-id-126'/>
- <var-decl name='libzfs_config_ops' type-id='type-id-126' mangled-name='libzfs_config_ops' visibility='default' elf-symbol-id='libzfs_config_ops'/>
- <enum-decl name='pool_state' id='type-id-127'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='POOL_STATE_ACTIVE' value='0'/>
- <enumerator name='POOL_STATE_EXPORTED' value='1'/>
- <enumerator name='POOL_STATE_DESTROYED' value='2'/>
- <enumerator name='POOL_STATE_SPARE' value='3'/>
- <enumerator name='POOL_STATE_L2CACHE' value='4'/>
- <enumerator name='POOL_STATE_UNINITIALIZED' value='5'/>
- <enumerator name='POOL_STATE_UNAVAIL' value='6'/>
- <enumerator name='POOL_STATE_POTENTIALLY_ACTIVE' value='7'/>
- </enum-decl>
- <typedef-decl name='pool_state_t' type-id='type-id-127' id='type-id-128'/>
- <pointer-type-def type-id='type-id-128' size-in-bits='64' id='type-id-129'/>
- <function-decl name='zpool_in_use' mangled-name='zpool_in_use' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_in_use'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-2' name='fd'/>
- <parameter type-id='type-id-129' name='state'/>
- <parameter type-id='type-id-117' name='namestr'/>
- <parameter type-id='type-id-85' name='inuse'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_clear_label' mangled-name='zpool_clear_label' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear_label'>
- <parameter type-id='type-id-2' name='fd'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_read_label' mangled-name='zpool_read_label' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_iter' mangled-name='zpool_iter' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__pread64_alias' mangled-name='pread64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='pwrite64' mangled-name='pwrite64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__fxstat64' mangled-name='__fxstat64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zcmd_write_conf_nvlist' mangled-name='zcmd_write_conf_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-type size-in-bits='64' id='type-id-123'>
- <parameter type-id='type-id-13'/>
- <parameter type-id='type-id-84'/>
- <parameter type-id='type-id-7'/>
- <parameter type-id='type-id-85'/>
- <return type-id='type-id-2'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-121'>
- <parameter type-id='type-id-13'/>
- <parameter type-id='type-id-19'/>
- <return type-id='type-id-19'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_iter.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zfs_iter_mounted' mangled-name='zfs_iter_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_mounted'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-79' name='func'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_iter_dependents' mangled-name='zfs_iter_dependents' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_dependents'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-9' name='allowrecursion'/>
- <parameter type-id='type-id-79' name='func'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_iter_children' mangled-name='zfs_iter_children' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_children'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-79' name='func'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_iter_snapspec' mangled-name='zfs_iter_snapspec' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapspec'>
- <parameter type-id='type-id-76' name='fs_zhp'/>
- <parameter type-id='type-id-84' name='spec_orig'/>
- <parameter type-id='type-id-79' name='func'/>
- <parameter type-id='type-id-13' name='arg'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_iter_snapshots_sorted' mangled-name='zfs_iter_snapshots_sorted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots_sorted'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-79' name='callback'/>
- <parameter type-id='type-id-13' name='data'/>
- <parameter type-id='type-id-7' name='min_txg'/>
- <parameter type-id='type-id-7' name='max_txg'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_iter_bookmarks' mangled-name='zfs_iter_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_bookmarks'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-79' name='func'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_iter_snapshots' mangled-name='zfs_iter_snapshots' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-9' name='simple'/>
- <parameter type-id='type-id-79' name='func'/>
- <parameter type-id='type-id-13' name='data'/>
- <parameter type-id='type-id-7' name='min_txg'/>
- <parameter type-id='type-id-7' name='max_txg'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_iter_filesystems' mangled-name='zfs_iter_filesystems' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_filesystems'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-79' name='func'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_get_clones_nvl' mangled-name='zfs_get_clones_nvl' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_dataset_exists' mangled-name='zfs_dataset_exists' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='make_bookmark_handle' mangled-name='make_bookmark_handle' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvpair_value_nvlist' mangled-name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_get_type' mangled-name='zfs_get_type' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='make_dataset_simple_handle_zc' mangled-name='make_dataset_simple_handle_zc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='make_dataset_handle_zc' mangled-name='make_dataset_handle_zc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_mount.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <class-decl name='__anonymous_struct__' size-in-bits='192' is-struct='yes' is-anonymous='yes' naming-typedef-id='type-id-130' visibility='default' id='type-id-131'>
+ <typedef-decl name='libzfs_handle_t' type-id='c8a9d9d8' id='95942d0c'/>
+ <typedef-decl name='zpool_handle_t' type-id='67002a8a' id='b1efc708'/>
+ <typedef-decl name='size_t' type-id='7359adad' id='b59d7dce'/>
+ <typedef-decl name='nvlist_t' type-id='ac266fd9' id='8e8d4be3'/>
+ <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='ac266fd9'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='p_prop' type-id='type-id-110' visibility='default'/>
+ <var-decl name='nvl_version' type-id='3ff5601b' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='nvl_nvflag' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='p_name' type-id='type-id-14' visibility='default'/>
+ <var-decl name='nvl_priv' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='p_share_err' type-id='type-id-2' visibility='default'/>
+ <var-decl name='nvl_flag' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='p_unshare_err' type-id='type-id-2' visibility='default'/>
+ <var-decl name='nvl_pad' type-id='3ff5601b' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='proto_table_t' type-id='type-id-131' id='type-id-130'/>
-
- <array-type-def dimensions='1' type-id='type-id-130' size-in-bits='384' id='type-id-132'>
- <subrange length='2' type-id='type-id-24' id='type-id-59'/>
-
- </array-type-def>
- <var-decl name='proto_table' type-id='type-id-132' visibility='default'/>
- <function-decl name='zpool_disable_datasets' mangled-name='zpool_disable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disable_datasets'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-9' name='force'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_enable_datasets' mangled-name='zpool_enable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_enable_datasets'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='mntopts'/>
- <parameter type-id='type-id-2' name='flags'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <pointer-type-def type-id='type-id-76' size-in-bits='64' id='type-id-133'/>
- <function-decl name='zfs_foreach_mountpoint' mangled-name='zfs_foreach_mountpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_foreach_mountpoint'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-133' name='handles'/>
- <parameter type-id='type-id-18' name='num_handles'/>
- <parameter type-id='type-id-79' name='func'/>
- <parameter type-id='type-id-13' name='data'/>
- <parameter type-id='type-id-9' name='parallel'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <class-decl name='get_all_cb' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-134'>
+ <typedef-decl name='int32_t' type-id='33f57a65' id='3ff5601b'/>
+ <typedef-decl name='__int32_t' type-id='95e97e5e' id='33f57a65'/>
+ <typedef-decl name='uint32_t' type-id='62f1140c' id='8f92235e'/>
+ <typedef-decl name='__uint32_t' type-id='f0981eeb' id='62f1140c'/>
+ <typedef-decl name='uint64_t' type-id='8910171f' id='9c313c2d'/>
+ <typedef-decl name='__uint64_t' type-id='7359adad' id='8910171f'/>
+ <typedef-decl name='diskaddr_t' type-id='9b3ff54f' id='804dc465'/>
+ <typedef-decl name='longlong_t' type-id='1eb56b1e' id='9b3ff54f'/>
+ <typedef-decl name='uu_avl_pool_t' type-id='12a530a8' id='7f84e390'/>
+ <typedef-decl name='uu_avl_t' type-id='4af029d1' id='bb7f0973'/>
+ <typedef-decl name='boolean_t' type-id='08f5ca17' id='c19b74c3'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca17'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='B_FALSE' value='0'/>
+ <enumerator name='B_TRUE' value='1'/>
+ </enum-decl>
+ <typedef-decl name='pthread_mutex_t' type-id='c4794498' id='7a6844eb'/>
+ <union-decl name='__anonymous_union__' size-in-bits='320' is-anonymous='yes' visibility='default' id='c4794498'>
+ <data-member access='private'>
+ <var-decl name='__data' type-id='4c734837' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='__size' type-id='36c46961' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='__align' type-id='bd54fe1a' visibility='default'/>
+ </data-member>
+ </union-decl>
+ <class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='4c734837'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='cb_handles' type-id='type-id-133' visibility='default'/>
+ <var-decl name='__lock' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='__count' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='cb_alloc' type-id='type-id-18' visibility='default'/>
+ <var-decl name='__owner' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='__nusers' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='cb_used' type-id='type-id-18' visibility='default'/>
+ <var-decl name='__kind' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='__spins' type-id='a2185560' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='176'>
+ <var-decl name='__elision' type-id='a2185560' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='__list' type-id='518fb49c' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='get_all_cb_t' type-id='type-id-134' id='type-id-135'/>
- <pointer-type-def type-id='type-id-135' size-in-bits='64' id='type-id-136'/>
- <function-decl name='libzfs_add_handle' mangled-name='libzfs_add_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_add_handle'>
- <parameter type-id='type-id-136' name='cbp'/>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_unshareall_bytype' mangled-name='zfs_unshareall_bytype' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_bytype'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='mountpoint'/>
- <parameter type-id='type-id-84' name='proto'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_unshareall_bypath' mangled-name='zfs_unshareall_bypath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_bypath'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='mountpoint'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_unshareall' mangled-name='zfs_unshareall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_unshareall_smb' mangled-name='zfs_unshareall_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_smb'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_unshareall_nfs' mangled-name='zfs_unshareall_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_nfs'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_unshare_smb' mangled-name='zfs_unshare_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare_smb'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='mountpoint'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_unshare_nfs' mangled-name='zfs_unshare_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare_nfs'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='mountpoint'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_share_smb' mangled-name='zfs_share_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share_smb'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_share_nfs' mangled-name='zfs_share_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share_nfs'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_commit_shares' mangled-name='zfs_commit_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_shares'>
- <parameter type-id='type-id-84' name='proto'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_is_shared_smb' mangled-name='zfs_is_shared_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared_smb'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-117' name='where'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_is_shared_nfs' mangled-name='zfs_is_shared_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared_nfs'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-117' name='where'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_unshare' mangled-name='zfs_unshare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_share' mangled-name='zfs_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_is_shared' mangled-name='zfs_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_unmountall' mangled-name='zfs_unmountall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmountall'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-2' name='flags'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_unmount' mangled-name='zfs_unmount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmount'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='mountpoint'/>
- <parameter type-id='type-id-2' name='flags'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_mount_at' mangled-name='zfs_mount_at' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_at'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='options'/>
- <parameter type-id='type-id-2' name='flags'/>
- <parameter type-id='type-id-84' name='mountpoint'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_mount' mangled-name='zfs_mount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='options'/>
- <parameter type-id='type-id-2' name='flags'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_is_mounted' mangled-name='zfs_is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_mounted'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-117' name='where'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='is_mounted' mangled-name='is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_mounted'>
- <parameter type-id='type-id-16' name='zfs_hdl'/>
- <parameter type-id='type-id-84' name='special'/>
- <parameter type-id='type-id-117' name='where'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_realloc' mangled-name='zfs_realloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='qsort' mangled-name='qsort' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='tpool_dispatch' mangled-name='tpool_dispatch' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='tpool_create' mangled-name='tpool_create' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='tpool_wait' mangled-name='tpool_wait' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='tpool_destroy' mangled-name='tpool_destroy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='rmdir' mangled-name='rmdir' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='changelist_unshare' mangled-name='changelist_unshare' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='libzfs_mnttab_find' mangled-name='libzfs_mnttab_find' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='sa_commit_shares' mangled-name='sa_commit_shares' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='sa_validate_shareopts' mangled-name='sa_validate_shareopts' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='sa_enable_share' mangled-name='sa_enable_share' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='sa_errorstr' mangled-name='sa_errorstr' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='sa_disable_share' mangled-name='sa_disable_share' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='libzfs_mnttab_remove' mangled-name='libzfs_mnttab_remove' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_crypto_get_encryption_root' mangled-name='zfs_crypto_get_encryption_root' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_crypto_unload_key' mangled-name='zfs_crypto_unload_key' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='do_unmount' mangled-name='do_unmount' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_spa_version' mangled-name='zfs_spa_version' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__lxstat' mangled-name='__lxstat64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__openat_alias' mangled-name='openat64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fdopendir' mangled-name='fdopendir' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='readdir64' mangled-name='readdir64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='closedir' mangled-name='closedir' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__xstat' mangled-name='__xstat64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='statfs64' mangled-name='statfs64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='do_mount' mangled-name='do_mount' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='libzfs_mnttab_add' mangled-name='libzfs_mnttab_add' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='mkdirp' mangled-name='mkdirp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_crypto_load_key' mangled-name='zfs_crypto_load_key' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='getprop_uint64' mangled-name='getprop_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='sa_is_shared' mangled-name='sa_is_shared' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_pool.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zpool_get_bootenv' mangled-name='zpool_get_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_bootenv'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-86' name='nvlp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <qualified-type-def type-id='type-id-33' const='yes' id='type-id-137'/>
- <pointer-type-def type-id='type-id-137' size-in-bits='64' id='type-id-138'/>
- <function-decl name='zpool_set_bootenv' mangled-name='zpool_set_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_bootenv'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-138' name='envmap'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-139'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZPOOL_WAIT_CKPT_DISCARD' value='0'/>
- <enumerator name='ZPOOL_WAIT_FREE' value='1'/>
- <enumerator name='ZPOOL_WAIT_INITIALIZE' value='2'/>
- <enumerator name='ZPOOL_WAIT_REPLACE' value='3'/>
- <enumerator name='ZPOOL_WAIT_REMOVE' value='4'/>
- <enumerator name='ZPOOL_WAIT_RESILVER' value='5'/>
- <enumerator name='ZPOOL_WAIT_SCRUB' value='6'/>
- <enumerator name='ZPOOL_WAIT_TRIM' value='7'/>
- <enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='8'/>
- </enum-decl>
- <typedef-decl name='zpool_wait_activity_t' type-id='type-id-139' id='type-id-140'/>
- <function-decl name='zpool_wait_status' mangled-name='zpool_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait_status'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-140' name='activity'/>
- <parameter type-id='type-id-85' name='missing'/>
- <parameter type-id='type-id-85' name='waited'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_wait' mangled-name='zpool_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-140' name='activity'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_obj_to_path_ds' mangled-name='zpool_obj_to_path_ds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path_ds'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-7' name='dsobj'/>
- <parameter type-id='type-id-7' name='obj'/>
- <parameter type-id='type-id-14' name='pathname'/>
- <parameter type-id='type-id-18' name='len'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_obj_to_path' mangled-name='zpool_obj_to_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-7' name='dsobj'/>
- <parameter type-id='type-id-7' name='obj'/>
- <parameter type-id='type-id-14' name='pathname'/>
- <parameter type-id='type-id-18' name='len'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_events_seek' mangled-name='zpool_events_seek' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_seek'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-7' name='eid'/>
- <parameter type-id='type-id-2' name='zevent_fd'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_events_clear' mangled-name='zpool_events_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_clear'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-114' name='count'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_events_next' mangled-name='zpool_events_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_next'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-86' name='nvp'/>
- <parameter type-id='type-id-114' name='dropped'/>
- <parameter type-id='type-id-30' name='flags'/>
- <parameter type-id='type-id-2' name='zevent_fd'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_get_history' mangled-name='zpool_get_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_history'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-86' name='nvhisp'/>
- <parameter type-id='type-id-108' name='off'/>
- <parameter type-id='type-id-85' name='eof'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_log_history' mangled-name='zpool_log_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_log_history'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='message'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_save_arguments' mangled-name='zfs_save_arguments' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_save_arguments'>
- <parameter type-id='type-id-2' name='argc'/>
- <parameter type-id='type-id-117' name='argv'/>
- <parameter type-id='type-id-14' name='string'/>
- <parameter type-id='type-id-2' name='len'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_upgrade' mangled-name='zpool_upgrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_upgrade'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-7' name='new_version'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_get_errlog' mangled-name='zpool_get_errlog' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_errlog'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-86' name='nverrlistp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_vdev_name' mangled-name='zpool_vdev_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_name'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-19' name='nv'/>
- <parameter type-id='type-id-2' name='name_flags'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='zpool_sync_one' mangled-name='zpool_sync_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_sync_one'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_reopen_one' mangled-name='zpool_reopen_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reopen_one'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_reguid' mangled-name='zpool_reguid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reguid'>
- <parameter type-id='type-id-4' name='zhp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_vdev_clear' mangled-name='zpool_vdev_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_clear'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-7' name='guid'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_clear' mangled-name='zpool_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-19' name='rewindnvl'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_vdev_indirect_size' mangled-name='zpool_vdev_indirect_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_indirect_size'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-108' name='sizep'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_vdev_remove_cancel' mangled-name='zpool_vdev_remove_cancel' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove_cancel'>
- <parameter type-id='type-id-4' name='zhp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_vdev_remove' mangled-name='zpool_vdev_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <class-decl name='splitflags' size-in-bits='64' is-struct='yes' visibility='default' id='type-id-141'>
+ <typedef-decl name='__pthread_list_t' type-id='0e01899c' id='518fb49c'/>
+ <class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='0e01899c'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='__prev' type-id='4d98cd5a' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='__next' type-id='4d98cd5a' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='avl_tree_t' type-id='b351119f' id='f20fbd51'/>
+ <class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='b351119f'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='avl_root' type-id='bf311473' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='avl_compar' type-id='585e1de9' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='avl_offset' type-id='b59d7dce' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='avl_numnodes' type-id='ee1f298e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='avl_pad' type-id='b59d7dce' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='428b67b3'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='avl_child' type-id='f0f65199' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='avl_pcb' type-id='e475ab95' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uintptr_t' type-id='7359adad' id='e475ab95'/>
+ <typedef-decl name='ulong_t' type-id='7359adad' id='ee1f298e'/>
+ <typedef-decl name='regex_t' type-id='19fc9a8c' id='aca3bac8'/>
+ <class-decl name='re_pattern_buffer' size-in-bits='512' is-struct='yes' visibility='default' id='19fc9a8c'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='buffer' type-id='cf536864' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='allocated' type-id='7359adad' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='used' type-id='7359adad' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='syntax' type-id='1b72c3b3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='fastmap' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='translate' type-id='cf536864' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='re_nsub' type-id='b59d7dce' visibility='default'/>
+ </data-member>
<data-member access='public' layout-offset-in-bits='31'>
- <var-decl name='dryrun' type-id='type-id-2' visibility='default'/>
+ <var-decl name='can_be_null' type-id='f0981eeb' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='30'>
- <var-decl name='import' type-id='type-id-2' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='29'>
+ <var-decl name='regs_allocated' type-id='f0981eeb' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='name_flags' type-id='type-id-2' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='28'>
+ <var-decl name='fastmap_accurate' type-id='f0981eeb' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='27'>
+ <var-decl name='no_sub' type-id='f0981eeb' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='26'>
+ <var-decl name='not_bol' type-id='f0981eeb' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='25'>
+ <var-decl name='not_eol' type-id='f0981eeb' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='24'>
+ <var-decl name='newline_anchor' type-id='f0981eeb' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='splitflags_t' type-id='type-id-141' id='type-id-142'/>
- <function-decl name='zpool_vdev_split' mangled-name='zpool_vdev_split' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_split'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-14' name='newname'/>
- <parameter type-id='type-id-86' name='newroot'/>
- <parameter type-id='type-id-19' name='props'/>
- <parameter type-id='type-id-142' name='flags'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_vdev_detach' mangled-name='zpool_vdev_detach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_detach'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_vdev_attach' mangled-name='zpool_vdev_attach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_attach'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='old_disk'/>
- <parameter type-id='type-id-84' name='new_disk'/>
- <parameter type-id='type-id-19' name='nvroot'/>
- <parameter type-id='type-id-2' name='replacing'/>
- <parameter type-id='type-id-9' name='rebuild'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <enum-decl name='vdev_aux' id='type-id-143'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='VDEV_AUX_NONE' value='0'/>
- <enumerator name='VDEV_AUX_OPEN_FAILED' value='1'/>
- <enumerator name='VDEV_AUX_CORRUPT_DATA' value='2'/>
- <enumerator name='VDEV_AUX_NO_REPLICAS' value='3'/>
- <enumerator name='VDEV_AUX_BAD_GUID_SUM' value='4'/>
- <enumerator name='VDEV_AUX_TOO_SMALL' value='5'/>
- <enumerator name='VDEV_AUX_BAD_LABEL' value='6'/>
- <enumerator name='VDEV_AUX_VERSION_NEWER' value='7'/>
- <enumerator name='VDEV_AUX_VERSION_OLDER' value='8'/>
- <enumerator name='VDEV_AUX_UNSUP_FEAT' value='9'/>
- <enumerator name='VDEV_AUX_SPARED' value='10'/>
- <enumerator name='VDEV_AUX_ERR_EXCEEDED' value='11'/>
- <enumerator name='VDEV_AUX_IO_FAILURE' value='12'/>
- <enumerator name='VDEV_AUX_BAD_LOG' value='13'/>
- <enumerator name='VDEV_AUX_EXTERNAL' value='14'/>
- <enumerator name='VDEV_AUX_SPLIT_POOL' value='15'/>
- <enumerator name='VDEV_AUX_BAD_ASHIFT' value='16'/>
- <enumerator name='VDEV_AUX_EXTERNAL_PERSIST' value='17'/>
- <enumerator name='VDEV_AUX_ACTIVE' value='18'/>
- <enumerator name='VDEV_AUX_CHILDREN_OFFLINE' value='19'/>
- <enumerator name='VDEV_AUX_ASHIFT_TOO_BIG' value='20'/>
- </enum-decl>
- <typedef-decl name='vdev_aux_t' type-id='type-id-143' id='type-id-144'/>
- <function-decl name='zpool_vdev_degrade' mangled-name='zpool_vdev_degrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_degrade'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-7' name='guid'/>
- <parameter type-id='type-id-144' name='aux'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_vdev_fault' mangled-name='zpool_vdev_fault' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_fault'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-7' name='guid'/>
- <parameter type-id='type-id-144' name='aux'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_vdev_offline' mangled-name='zpool_vdev_offline' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_offline'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-9' name='istmp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <enum-decl name='vdev_state' id='type-id-145'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='VDEV_STATE_UNKNOWN' value='0'/>
- <enumerator name='VDEV_STATE_CLOSED' value='1'/>
- <enumerator name='VDEV_STATE_OFFLINE' value='2'/>
- <enumerator name='VDEV_STATE_REMOVED' value='3'/>
- <enumerator name='VDEV_STATE_CANT_OPEN' value='4'/>
- <enumerator name='VDEV_STATE_FAULTED' value='5'/>
- <enumerator name='VDEV_STATE_DEGRADED' value='6'/>
- <enumerator name='VDEV_STATE_HEALTHY' value='7'/>
- </enum-decl>
- <typedef-decl name='vdev_state_t' type-id='type-id-145' id='type-id-146'/>
- <pointer-type-def type-id='type-id-146' size-in-bits='64' id='type-id-147'/>
- <function-decl name='zpool_vdev_online' mangled-name='zpool_vdev_online' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_online'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-2' name='flags'/>
- <parameter type-id='type-id-147' name='newstate'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_vdev_path_to_guid' mangled-name='zpool_vdev_path_to_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_path_to_guid'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <function-decl name='zpool_get_physpath' mangled-name='zpool_get_physpath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_physpath'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-14' name='physpath'/>
- <parameter type-id='type-id-18' name='phypath_size'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_find_vdev' mangled-name='zpool_find_vdev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-85' name='avail_spare'/>
- <parameter type-id='type-id-85' name='l2cache'/>
- <parameter type-id='type-id-85' name='log'/>
- <return type-id='type-id-19'/>
- </function-decl>
- <function-decl name='zpool_find_vdev_by_physpath' mangled-name='zpool_find_vdev_by_physpath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev_by_physpath'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='ppath'/>
- <parameter type-id='type-id-85' name='avail_spare'/>
- <parameter type-id='type-id-85' name='l2cache'/>
- <parameter type-id='type-id-85' name='log'/>
- <return type-id='type-id-19'/>
- </function-decl>
- <enum-decl name='pool_scan_func' id='type-id-148'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='POOL_SCAN_NONE' value='0'/>
- <enumerator name='POOL_SCAN_SCRUB' value='1'/>
- <enumerator name='POOL_SCAN_RESILVER' value='2'/>
- <enumerator name='POOL_SCAN_FUNCS' value='3'/>
- </enum-decl>
- <typedef-decl name='pool_scan_func_t' type-id='type-id-148' id='type-id-149'/>
- <enum-decl name='pool_scrub_cmd' id='type-id-150'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='POOL_SCRUB_NORMAL' value='0'/>
- <enumerator name='POOL_SCRUB_PAUSE' value='1'/>
- <enumerator name='POOL_SCRUB_FLAGS_END' value='2'/>
- </enum-decl>
- <typedef-decl name='pool_scrub_cmd_t' type-id='type-id-150' id='type-id-151'/>
- <function-decl name='zpool_scan' mangled-name='zpool_scan' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_scan'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-149' name='func'/>
- <parameter type-id='type-id-151' name='cmd'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <enum-decl name='pool_trim_func' id='type-id-152'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='POOL_TRIM_START' value='0'/>
- <enumerator name='POOL_TRIM_CANCEL' value='1'/>
- <enumerator name='POOL_TRIM_SUSPEND' value='2'/>
- <enumerator name='POOL_TRIM_FUNCS' value='3'/>
- </enum-decl>
- <typedef-decl name='pool_trim_func_t' type-id='type-id-152' id='type-id-153'/>
- <class-decl name='trimflags' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-154'>
+ <typedef-decl name='reg_syntax_t' type-id='7359adad' id='1b72c3b3'/>
+ <typedef-decl name='zfs_iter_f' type-id='5571cde4' id='d8e49ab9'/>
+ <typedef-decl name='zfs_handle_t' type-id='f6ee4445' id='775509eb'/>
+ <class-decl name='zfs_handle' size-in-bits='4928' is-struct='yes' visibility='default' id='f6ee4445'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='fullpool' type-id='type-id-9' visibility='default'/>
+ <var-decl name='zfs_hdl' type-id='b0382bb3' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='secure' type-id='type-id-9' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='zpool_hdl' type-id='4c81de99' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='zfs_name' type-id='d1617432' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2176'>
+ <var-decl name='zfs_type' type-id='2e45de5d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2208'>
+ <var-decl name='zfs_head_type' type-id='2e45de5d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2240'>
+ <var-decl name='zfs_dmustats' type-id='b2c14f17' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='4544'>
+ <var-decl name='zfs_props' type-id='5ce45b60' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='4608'>
+ <var-decl name='zfs_user_props' type-id='5ce45b60' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='4672'>
+ <var-decl name='zfs_recvd_props' type-id='5ce45b60' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='4736'>
+ <var-decl name='zfs_mntcheck' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='4800'>
+ <var-decl name='zfs_mntopts' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='4864'>
+ <var-decl name='zfs_props_table' type-id='ae3e8ca6' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='zfs_type_t' type-id='40ed39d4' id='2e45de5d'/>
+ <enum-decl name='__anonymous_enum__1' is-anonymous='yes' id='40ed39d4'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZFS_TYPE_FILESYSTEM' value='1'/>
+ <enumerator name='ZFS_TYPE_SNAPSHOT' value='2'/>
+ <enumerator name='ZFS_TYPE_VOLUME' value='4'/>
+ <enumerator name='ZFS_TYPE_POOL' value='8'/>
+ <enumerator name='ZFS_TYPE_BOOKMARK' value='16'/>
+ </enum-decl>
+ <typedef-decl name='dmu_objset_stats_t' type-id='098f0221' id='b2c14f17'/>
+ <class-decl name='dmu_objset_stats' size-in-bits='2304' is-struct='yes' visibility='default' id='098f0221'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='dds_num_clones' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='wait' type-id='type-id-9' visibility='default'/>
+ <var-decl name='dds_creation_txg' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='dds_guid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='dds_type' type-id='230f1e16' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='224'>
+ <var-decl name='dds_is_snapshot' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='232'>
+ <var-decl name='dds_inconsistent' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='240'>
+ <var-decl name='dds_redacted' type-id='b96825af' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='rate' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='248'>
+ <var-decl name='dds_origin' type-id='d1617432' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='trimflags_t' type-id='type-id-154' id='type-id-155'/>
- <pointer-type-def type-id='type-id-155' size-in-bits='64' id='type-id-156'/>
- <function-decl name='zpool_trim' mangled-name='zpool_trim' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_trim'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-153' name='cmd_type'/>
- <parameter type-id='type-id-19' name='vds'/>
- <parameter type-id='type-id-156' name='trim_flags'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <enum-decl name='pool_initialize_func' id='type-id-157'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='POOL_INITIALIZE_START' value='0'/>
- <enumerator name='POOL_INITIALIZE_CANCEL' value='1'/>
- <enumerator name='POOL_INITIALIZE_SUSPEND' value='2'/>
- <enumerator name='POOL_INITIALIZE_FUNCS' value='3'/>
+ <typedef-decl name='dmu_objset_type_t' type-id='6b1b19f9' id='230f1e16'/>
+ <enum-decl name='dmu_objset_type' id='6b1b19f9'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='DMU_OST_NONE' value='0'/>
+ <enumerator name='DMU_OST_META' value='1'/>
+ <enumerator name='DMU_OST_ZFS' value='2'/>
+ <enumerator name='DMU_OST_ZVOL' value='3'/>
+ <enumerator name='DMU_OST_OTHER' value='4'/>
+ <enumerator name='DMU_OST_ANY' value='5'/>
+ <enumerator name='DMU_OST_NUMTYPES' value='6'/>
</enum-decl>
- <typedef-decl name='pool_initialize_func_t' type-id='type-id-157' id='type-id-158'/>
- <function-decl name='zpool_initialize_wait' mangled-name='zpool_initialize_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize_wait'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-158' name='cmd_type'/>
- <parameter type-id='type-id-19' name='vds'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_initialize' mangled-name='zpool_initialize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-158' name='cmd_type'/>
- <parameter type-id='type-id-19' name='vds'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_import_props' mangled-name='zpool_import_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_props'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-19' name='config'/>
- <parameter type-id='type-id-84' name='newname'/>
- <parameter type-id='type-id-19' name='props'/>
- <parameter type-id='type-id-2' name='flags'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_print_unsup_feat' mangled-name='zpool_print_unsup_feat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_print_unsup_feat'>
- <parameter type-id='type-id-19' name='config'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_import' mangled-name='zpool_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-19' name='config'/>
- <parameter type-id='type-id-84' name='newname'/>
- <parameter type-id='type-id-14' name='altroot'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_explain_recover' mangled-name='zpool_explain_recover' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_explain_recover'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-2' name='reason'/>
- <parameter type-id='type-id-19' name='config'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_export_force' mangled-name='zpool_export_force' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export_force'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='log_str'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_export' mangled-name='zpool_export' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-9' name='force'/>
- <parameter type-id='type-id-84' name='log_str'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_add' mangled-name='zpool_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_add'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-19' name='nvroot'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_discard_checkpoint' mangled-name='zpool_discard_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_discard_checkpoint'>
- <parameter type-id='type-id-4' name='zhp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_checkpoint' mangled-name='zpool_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_checkpoint'>
- <parameter type-id='type-id-4' name='zhp'/>
- <return type-id='type-id-2'/>
+ <typedef-decl name='uint8_t' type-id='c51d6389' id='b96825af'/>
+ <typedef-decl name='__uint8_t' type-id='002ac4a6' id='c51d6389'/>
+ <typedef-decl name='zpool_iter_f' type-id='3aebb66f' id='fa476e62'/>
+ <pointer-type-def type-id='0e01899c' size-in-bits='64' id='4d98cd5a'/>
+ <pointer-type-def type-id='428b67b3' size-in-bits='64' id='bf311473'/>
+ <pointer-type-def type-id='c19b74c3' size-in-bits='64' id='37e3bd22'/>
+ <pointer-type-def type-id='a84c031d' size-in-bits='64' id='26a90f95'/>
+ <qualified-type-def type-id='a84c031d' const='yes' id='9b45d938'/>
+ <pointer-type-def type-id='9b45d938' size-in-bits='64' id='80f4b756'/>
+ <pointer-type-def type-id='96ee24a5' size-in-bits='64' id='585e1de9'/>
+ <pointer-type-def type-id='cb9628fa' size-in-bits='64' id='5571cde4'/>
+ <pointer-type-def type-id='2bce87e3' size-in-bits='64' id='3aebb66f'/>
+ <pointer-type-def type-id='95942d0c' size-in-bits='64' id='b0382bb3'/>
+ <pointer-type-def type-id='8e8d4be3' size-in-bits='64' id='5ce45b60'/>
+ <pointer-type-def type-id='5ce45b60' size-in-bits='64' id='857bb57e'/>
+ <pointer-type-def type-id='b96825af' size-in-bits='64' id='ae3e8ca6'/>
+ <pointer-type-def type-id='002ac4a6' size-in-bits='64' id='cf536864'/>
+ <pointer-type-def type-id='7f84e390' size-in-bits='64' id='de82c773'/>
+ <pointer-type-def type-id='bb7f0973' size-in-bits='64' id='a5c21a38'/>
+ <pointer-type-def type-id='48b5725f' size-in-bits='64' id='eaa32e2f'/>
+ <pointer-type-def type-id='775509eb' size-in-bits='64' id='9200a744'/>
+ <pointer-type-def type-id='b1efc708' size-in-bits='64' id='4c81de99'/>
+ <function-decl name='zfs_iter_root' mangled-name='zfs_iter_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_root'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='d8e49ab9' name='func'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_destroy' mangled-name='zpool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_destroy'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='log_str'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zpool_iter' mangled-name='zpool_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_iter'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='fa476e62' name='func'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_create' mangled-name='zpool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_create'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='pool'/>
- <parameter type-id='type-id-19' name='nvroot'/>
- <parameter type-id='type-id-19' name='props'/>
- <parameter type-id='type-id-19' name='fsprops'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zpool_skip_pool' mangled-name='zpool_skip_pool' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_skip_pool'>
+ <parameter type-id='80f4b756' name='poolname'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='zpool_is_draid_spare' mangled-name='zpool_is_draid_spare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_is_draid_spare'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-9'/>
+ <function-decl name='zpool_refresh_stats' mangled-name='zpool_refresh_stats' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_refresh_stats'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='37e3bd22' name='missing'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_get_state' mangled-name='zpool_get_state' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state'>
- <parameter type-id='type-id-4' name='zhp'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zpool_get_features' mangled-name='zpool_get_features' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_features'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <return type-id='5ce45b60'/>
</function-decl>
- <function-decl name='zpool_get_name' mangled-name='zpool_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_name'>
- <parameter type-id='type-id-4' name='zhp'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zpool_get_config' mangled-name='zpool_get_config' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_config'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='857bb57e' name='oldconfig'/>
+ <return type-id='5ce45b60'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='96ee24a5'>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='eaa32e2f'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='cb9628fa'>
+ <parameter type-id='9200a744'/>
+ <parameter type-id='eaa32e2f'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='2bce87e3'>
+ <parameter type-id='4c81de99'/>
+ <parameter type-id='eaa32e2f'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_crypto.c' language='LANG_C99'>
+ <typedef-decl name='uint_t' type-id='f0981eeb' id='3502e3ff'/>
+ <pointer-type-def type-id='ae3e8ca6' size-in-bits='64' id='d8774064'/>
+ <pointer-type-def type-id='3502e3ff' size-in-bits='64' id='4dd26a40'/>
+ <function-decl name='zfs_crypto_rewrap' mangled-name='zfs_crypto_rewrap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_rewrap'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='5ce45b60' name='raw_props'/>
+ <parameter type-id='c19b74c3' name='inheritkey'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_close' mangled-name='zpool_close' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_close'>
- <parameter type-id='type-id-4' name='zhp'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_crypto_unload_key' mangled-name='zfs_crypto_unload_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_unload_key'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_open' mangled-name='zpool_open' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='pool'/>
- <return type-id='type-id-4'/>
+ <function-decl name='zfs_crypto_load_key' mangled-name='zfs_crypto_load_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_load_key'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='c19b74c3' name='noop'/>
+ <parameter type-id='26a90f95' name='alt_keylocation'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_open_canfail' mangled-name='zpool_open_canfail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open_canfail'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='pool'/>
- <return type-id='type-id-4'/>
+ <function-decl name='zfs_crypto_attempt_load_keys' mangled-name='zfs_crypto_attempt_load_keys' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_attempt_load_keys'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='26a90f95' name='fsname'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_prop_get_feature' mangled-name='zpool_prop_get_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_feature'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-14' name='buf'/>
- <parameter type-id='type-id-18' name='len'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_crypto_clone_check' mangled-name='zfs_crypto_clone_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_clone_check'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='9200a744' name='origin_zhp'/>
+ <parameter type-id='26a90f95' name='parent_name'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_expand_proplist' mangled-name='zpool_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_expand_proplist'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-103' name='plp'/>
- <parameter type-id='type-id-9' name='literal'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_crypto_create' mangled-name='zfs_crypto_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_create'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='26a90f95' name='parent_name'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='5ce45b60' name='pool_props'/>
+ <parameter type-id='c19b74c3' name='stdin_available'/>
+ <parameter type-id='d8774064' name='wkeydata_out'/>
+ <parameter type-id='4dd26a40' name='wkeylen_out'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_set_prop' mangled-name='zpool_set_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_prop'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-84' name='propval'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_crypto_get_encryption_root' mangled-name='zfs_crypto_get_encryption_root' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_crypto_get_encryption_root'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='37e3bd22' name='is_encroot'/>
+ <parameter type-id='26a90f95' name='buf'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-159'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZPOOL_PROP_INVAL' value='-1'/>
- <enumerator name='ZPOOL_PROP_NAME' value='0'/>
- <enumerator name='ZPOOL_PROP_SIZE' value='1'/>
- <enumerator name='ZPOOL_PROP_CAPACITY' value='2'/>
- <enumerator name='ZPOOL_PROP_ALTROOT' value='3'/>
- <enumerator name='ZPOOL_PROP_HEALTH' value='4'/>
- <enumerator name='ZPOOL_PROP_GUID' value='5'/>
- <enumerator name='ZPOOL_PROP_VERSION' value='6'/>
- <enumerator name='ZPOOL_PROP_BOOTFS' value='7'/>
- <enumerator name='ZPOOL_PROP_DELEGATION' value='8'/>
- <enumerator name='ZPOOL_PROP_AUTOREPLACE' value='9'/>
- <enumerator name='ZPOOL_PROP_CACHEFILE' value='10'/>
- <enumerator name='ZPOOL_PROP_FAILUREMODE' value='11'/>
- <enumerator name='ZPOOL_PROP_LISTSNAPS' value='12'/>
- <enumerator name='ZPOOL_PROP_AUTOEXPAND' value='13'/>
- <enumerator name='ZPOOL_PROP_DEDUPDITTO' value='14'/>
- <enumerator name='ZPOOL_PROP_DEDUPRATIO' value='15'/>
- <enumerator name='ZPOOL_PROP_FREE' value='16'/>
- <enumerator name='ZPOOL_PROP_ALLOCATED' value='17'/>
- <enumerator name='ZPOOL_PROP_READONLY' value='18'/>
- <enumerator name='ZPOOL_PROP_ASHIFT' value='19'/>
- <enumerator name='ZPOOL_PROP_COMMENT' value='20'/>
- <enumerator name='ZPOOL_PROP_EXPANDSZ' value='21'/>
- <enumerator name='ZPOOL_PROP_FREEING' value='22'/>
- <enumerator name='ZPOOL_PROP_FRAGMENTATION' value='23'/>
- <enumerator name='ZPOOL_PROP_LEAKED' value='24'/>
- <enumerator name='ZPOOL_PROP_MAXBLOCKSIZE' value='25'/>
- <enumerator name='ZPOOL_PROP_TNAME' value='26'/>
- <enumerator name='ZPOOL_PROP_MAXDNODESIZE' value='27'/>
- <enumerator name='ZPOOL_PROP_MULTIHOST' value='28'/>
- <enumerator name='ZPOOL_PROP_CHECKPOINT' value='29'/>
- <enumerator name='ZPOOL_PROP_LOAD_GUID' value='30'/>
- <enumerator name='ZPOOL_PROP_AUTOTRIM' value='31'/>
- <enumerator name='ZPOOL_PROP_COMPATIBILITY' value='32'/>
- <enumerator name='ZPOOL_NUM_PROPS' value='33'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_dataset.c' language='LANG_C99'>
+ <typedef-decl name='zfs_wait_activity_t' type-id='08f5ca1c' id='3024501a'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca1c'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZFS_WAIT_DELETEQ' value='0'/>
+ <enumerator name='ZFS_WAIT_NUM_ACTIVITIES' value='1'/>
</enum-decl>
- <typedef-decl name='zpool_prop_t' type-id='type-id-159' id='type-id-160'/>
- <function-decl name='zpool_get_prop' mangled-name='zpool_get_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-160' name='prop'/>
- <parameter type-id='type-id-14' name='buf'/>
- <parameter type-id='type-id-18' name='len'/>
- <parameter type-id='type-id-113' name='srctype'/>
- <parameter type-id='type-id-9' name='literal'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_pool_state_to_name' mangled-name='zpool_pool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_pool_state_to_name'>
- <parameter type-id='type-id-128' name='state'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zpool_state_to_name' mangled-name='zpool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_state_to_name'>
- <parameter type-id='type-id-146' name='state'/>
- <parameter type-id='type-id-144' name='aux'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zpool_get_prop_int' mangled-name='zpool_get_prop_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop_int'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-160' name='prop'/>
- <parameter type-id='type-id-113' name='src'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <function-decl name='zpool_props_refresh' mangled-name='zpool_props_refresh' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_props_refresh'>
- <parameter type-id='type-id-4' name='zhp'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_get_state_str' mangled-name='zpool_get_state_str' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state_str'>
- <parameter type-id='type-id-4' name='zhp'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-161'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZPOOL_COMPATIBILITY_OK' value='0'/>
- <enumerator name='ZPOOL_COMPATIBILITY_WARNTOKEN' value='1'/>
- <enumerator name='ZPOOL_COMPATIBILITY_BADTOKEN' value='2'/>
- <enumerator name='ZPOOL_COMPATIBILITY_BADFILE' value='3'/>
- <enumerator name='ZPOOL_COMPATIBILITY_NOFILES' value='4'/>
+ <typedef-decl name='zfs_userquota_prop_t' type-id='40ed39d6' id='279fde6a'/>
+ <enum-decl name='__anonymous_enum__1' is-anonymous='yes' id='40ed39d6'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZFS_PROP_USERUSED' value='0'/>
+ <enumerator name='ZFS_PROP_USERQUOTA' value='1'/>
+ <enumerator name='ZFS_PROP_GROUPUSED' value='2'/>
+ <enumerator name='ZFS_PROP_GROUPQUOTA' value='3'/>
+ <enumerator name='ZFS_PROP_USEROBJUSED' value='4'/>
+ <enumerator name='ZFS_PROP_USEROBJQUOTA' value='5'/>
+ <enumerator name='ZFS_PROP_GROUPOBJUSED' value='6'/>
+ <enumerator name='ZFS_PROP_GROUPOBJQUOTA' value='7'/>
+ <enumerator name='ZFS_PROP_PROJECTUSED' value='8'/>
+ <enumerator name='ZFS_PROP_PROJECTQUOTA' value='9'/>
+ <enumerator name='ZFS_PROP_PROJECTOBJUSED' value='10'/>
+ <enumerator name='ZFS_PROP_PROJECTOBJQUOTA' value='11'/>
+ <enumerator name='ZFS_NUM_USERQUOTA_PROPS' value='12'/>
+ </enum-decl>
+ <typedef-decl name='zfs_userspace_cb_t' type-id='ca64ff60' id='16c5f410'/>
+ <typedef-decl name='uid_t' type-id='cc5fcceb' id='354978ed'/>
+ <typedef-decl name='__uid_t' type-id='f0981eeb' id='cc5fcceb'/>
+ <typedef-decl name='zprop_list_t' type-id='bd9b4291' id='bdb8ac4f'/>
+ <class-decl name='zprop_list' size-in-bits='448' is-struct='yes' visibility='default' id='bd9b4291'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='pl_prop' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='pl_user_prop' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='pl_next' type-id='9f1a1109' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='pl_all' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='pl_width' type-id='b59d7dce' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='pl_recvd_width' type-id='b59d7dce' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='pl_fixed' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='renameflags_t' type-id='7aee5792' id='067170c2'/>
+ <class-decl name='renameflags' size-in-bits='32' is-struct='yes' visibility='default' id='7aee5792'>
+ <data-member access='public' layout-offset-in-bits='31'>
+ <var-decl name='recursive' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='30'>
+ <var-decl name='nounmount' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='29'>
+ <var-decl name='forceunmount' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='zfs_prop_t' type-id='3fed383f' id='58603c44'/>
+ <enum-decl name='__anonymous_enum__2' is-anonymous='yes' id='3fed383f'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZPROP_CONT' value='-2'/>
+ <enumerator name='ZPROP_INVAL' value='-1'/>
+ <enumerator name='ZFS_PROP_TYPE' value='0'/>
+ <enumerator name='ZFS_PROP_CREATION' value='1'/>
+ <enumerator name='ZFS_PROP_USED' value='2'/>
+ <enumerator name='ZFS_PROP_AVAILABLE' value='3'/>
+ <enumerator name='ZFS_PROP_REFERENCED' value='4'/>
+ <enumerator name='ZFS_PROP_COMPRESSRATIO' value='5'/>
+ <enumerator name='ZFS_PROP_MOUNTED' value='6'/>
+ <enumerator name='ZFS_PROP_ORIGIN' value='7'/>
+ <enumerator name='ZFS_PROP_QUOTA' value='8'/>
+ <enumerator name='ZFS_PROP_RESERVATION' value='9'/>
+ <enumerator name='ZFS_PROP_VOLSIZE' value='10'/>
+ <enumerator name='ZFS_PROP_VOLBLOCKSIZE' value='11'/>
+ <enumerator name='ZFS_PROP_RECORDSIZE' value='12'/>
+ <enumerator name='ZFS_PROP_MOUNTPOINT' value='13'/>
+ <enumerator name='ZFS_PROP_SHARENFS' value='14'/>
+ <enumerator name='ZFS_PROP_CHECKSUM' value='15'/>
+ <enumerator name='ZFS_PROP_COMPRESSION' value='16'/>
+ <enumerator name='ZFS_PROP_ATIME' value='17'/>
+ <enumerator name='ZFS_PROP_DEVICES' value='18'/>
+ <enumerator name='ZFS_PROP_EXEC' value='19'/>
+ <enumerator name='ZFS_PROP_SETUID' value='20'/>
+ <enumerator name='ZFS_PROP_READONLY' value='21'/>
+ <enumerator name='ZFS_PROP_ZONED' value='22'/>
+ <enumerator name='ZFS_PROP_SNAPDIR' value='23'/>
+ <enumerator name='ZFS_PROP_ACLMODE' value='24'/>
+ <enumerator name='ZFS_PROP_ACLINHERIT' value='25'/>
+ <enumerator name='ZFS_PROP_CREATETXG' value='26'/>
+ <enumerator name='ZFS_PROP_NAME' value='27'/>
+ <enumerator name='ZFS_PROP_CANMOUNT' value='28'/>
+ <enumerator name='ZFS_PROP_ISCSIOPTIONS' value='29'/>
+ <enumerator name='ZFS_PROP_XATTR' value='30'/>
+ <enumerator name='ZFS_PROP_NUMCLONES' value='31'/>
+ <enumerator name='ZFS_PROP_COPIES' value='32'/>
+ <enumerator name='ZFS_PROP_VERSION' value='33'/>
+ <enumerator name='ZFS_PROP_UTF8ONLY' value='34'/>
+ <enumerator name='ZFS_PROP_NORMALIZE' value='35'/>
+ <enumerator name='ZFS_PROP_CASE' value='36'/>
+ <enumerator name='ZFS_PROP_VSCAN' value='37'/>
+ <enumerator name='ZFS_PROP_NBMAND' value='38'/>
+ <enumerator name='ZFS_PROP_SHARESMB' value='39'/>
+ <enumerator name='ZFS_PROP_REFQUOTA' value='40'/>
+ <enumerator name='ZFS_PROP_REFRESERVATION' value='41'/>
+ <enumerator name='ZFS_PROP_GUID' value='42'/>
+ <enumerator name='ZFS_PROP_PRIMARYCACHE' value='43'/>
+ <enumerator name='ZFS_PROP_SECONDARYCACHE' value='44'/>
+ <enumerator name='ZFS_PROP_USEDSNAP' value='45'/>
+ <enumerator name='ZFS_PROP_USEDDS' value='46'/>
+ <enumerator name='ZFS_PROP_USEDCHILD' value='47'/>
+ <enumerator name='ZFS_PROP_USEDREFRESERV' value='48'/>
+ <enumerator name='ZFS_PROP_USERACCOUNTING' value='49'/>
+ <enumerator name='ZFS_PROP_STMF_SHAREINFO' value='50'/>
+ <enumerator name='ZFS_PROP_DEFER_DESTROY' value='51'/>
+ <enumerator name='ZFS_PROP_USERREFS' value='52'/>
+ <enumerator name='ZFS_PROP_LOGBIAS' value='53'/>
+ <enumerator name='ZFS_PROP_UNIQUE' value='54'/>
+ <enumerator name='ZFS_PROP_OBJSETID' value='55'/>
+ <enumerator name='ZFS_PROP_DEDUP' value='56'/>
+ <enumerator name='ZFS_PROP_MLSLABEL' value='57'/>
+ <enumerator name='ZFS_PROP_SYNC' value='58'/>
+ <enumerator name='ZFS_PROP_DNODESIZE' value='59'/>
+ <enumerator name='ZFS_PROP_REFRATIO' value='60'/>
+ <enumerator name='ZFS_PROP_WRITTEN' value='61'/>
+ <enumerator name='ZFS_PROP_CLONES' value='62'/>
+ <enumerator name='ZFS_PROP_LOGICALUSED' value='63'/>
+ <enumerator name='ZFS_PROP_LOGICALREFERENCED' value='64'/>
+ <enumerator name='ZFS_PROP_INCONSISTENT' value='65'/>
+ <enumerator name='ZFS_PROP_VOLMODE' value='66'/>
+ <enumerator name='ZFS_PROP_FILESYSTEM_LIMIT' value='67'/>
+ <enumerator name='ZFS_PROP_SNAPSHOT_LIMIT' value='68'/>
+ <enumerator name='ZFS_PROP_FILESYSTEM_COUNT' value='69'/>
+ <enumerator name='ZFS_PROP_SNAPSHOT_COUNT' value='70'/>
+ <enumerator name='ZFS_PROP_SNAPDEV' value='71'/>
+ <enumerator name='ZFS_PROP_ACLTYPE' value='72'/>
+ <enumerator name='ZFS_PROP_SELINUX_CONTEXT' value='73'/>
+ <enumerator name='ZFS_PROP_SELINUX_FSCONTEXT' value='74'/>
+ <enumerator name='ZFS_PROP_SELINUX_DEFCONTEXT' value='75'/>
+ <enumerator name='ZFS_PROP_SELINUX_ROOTCONTEXT' value='76'/>
+ <enumerator name='ZFS_PROP_RELATIME' value='77'/>
+ <enumerator name='ZFS_PROP_REDUNDANT_METADATA' value='78'/>
+ <enumerator name='ZFS_PROP_OVERLAY' value='79'/>
+ <enumerator name='ZFS_PROP_PREV_SNAP' value='80'/>
+ <enumerator name='ZFS_PROP_RECEIVE_RESUME_TOKEN' value='81'/>
+ <enumerator name='ZFS_PROP_ENCRYPTION' value='82'/>
+ <enumerator name='ZFS_PROP_KEYLOCATION' value='83'/>
+ <enumerator name='ZFS_PROP_KEYFORMAT' value='84'/>
+ <enumerator name='ZFS_PROP_PBKDF2_SALT' value='85'/>
+ <enumerator name='ZFS_PROP_PBKDF2_ITERS' value='86'/>
+ <enumerator name='ZFS_PROP_ENCRYPTION_ROOT' value='87'/>
+ <enumerator name='ZFS_PROP_KEY_GUID' value='88'/>
+ <enumerator name='ZFS_PROP_KEYSTATUS' value='89'/>
+ <enumerator name='ZFS_PROP_REMAPTXG' value='90'/>
+ <enumerator name='ZFS_PROP_SPECIAL_SMALL_BLOCKS' value='91'/>
+ <enumerator name='ZFS_PROP_IVSET_GUID' value='92'/>
+ <enumerator name='ZFS_PROP_REDACTED' value='93'/>
+ <enumerator name='ZFS_PROP_REDACT_SNAPS' value='94'/>
+ <enumerator name='ZFS_NUM_PROPS' value='95'/>
+ </enum-decl>
+ <typedef-decl name='zprop_source_t' type-id='3eed36ac' id='a2256d42'/>
+ <enum-decl name='__anonymous_enum__3' is-anonymous='yes' id='3eed36ac'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZPROP_SRC_NONE' value='1'/>
+ <enumerator name='ZPROP_SRC_DEFAULT' value='2'/>
+ <enumerator name='ZPROP_SRC_TEMPORARY' value='4'/>
+ <enumerator name='ZPROP_SRC_LOCAL' value='8'/>
+ <enumerator name='ZPROP_SRC_INHERITED' value='16'/>
+ <enumerator name='ZPROP_SRC_RECEIVED' value='32'/>
</enum-decl>
- <typedef-decl name='zpool_compat_status_t' type-id='type-id-161' id='type-id-162'/>
- <function-decl name='zpool_load_compat' mangled-name='zpool_load_compat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_load_compat'>
- <parameter type-id='type-id-84' name='compat'/>
- <parameter type-id='type-id-85' name='features'/>
- <parameter type-id='type-id-14' name='report'/>
- <parameter type-id='type-id-18' name='rlen'/>
- <return type-id='type-id-162'/>
- </function-decl>
- <function-decl name='lzc_get_bootenv' mangled-name='lzc_get_bootenv' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_standard_error_fmt' mangled-name='zpool_standard_error_fmt' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_set_bootenv' mangled-name='lzc_set_bootenv' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_wait' mangled-name='lzc_wait' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_standard_error' mangled-name='zpool_standard_error' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_history_unpack' mangled-name='zpool_history_unpack' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_add_nvlist_array' mangled-name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_basename' mangled-name='zfs_basename' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='memcmp' mangled-name='memcmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__realpath_alias' mangled-name='realpath' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strncasecmp' mangled-name='strncasecmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_strip_path' mangled-name='zfs_strip_path' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_strip_partition' mangled-name='zfs_strip_partition' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_get_handle' mangled-name='zpool_get_handle' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvlist_add_boolean_value' mangled-name='fnvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_sync' mangled-name='lzc_sync' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_reopen' mangled-name='lzc_reopen' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_get_load_policy' mangled-name='zpool_get_load_policy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_uint64' mangled-name='fnvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_prop_to_name' mangled-name='zpool_prop_to_name' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfeature_lookup_guid' mangled-name='zfeature_lookup_guid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_resolve_shortname' mangled-name='zfs_resolve_shortname' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_relabel_disk' mangled-name='zpool_relabel_disk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strtoull' mangled-name='strtoull' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_strcmp_pathname' mangled-name='zfs_strcmp_pathname' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_wait_tag' mangled-name='lzc_wait_tag' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_trim' mangled-name='lzc_trim' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fnvpair_value_int64' mangled-name='fnvpair_value_int64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzc_initialize' mangled-name='lzc_initialize' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='1b055409'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='mnt_mountp' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='mnt_fstype' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='mnt_mntopts' type-id='26a90f95' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <pointer-type-def type-id='26a90f95' size-in-bits='64' id='9b23c9ad'/>
+ <qualified-type-def type-id='775509eb' const='yes' id='5eadf2db'/>
+ <pointer-type-def type-id='5eadf2db' size-in-bits='64' id='fcd57163'/>
+ <pointer-type-def type-id='7e291ce6' size-in-bits='64' id='ca64ff60'/>
+ <pointer-type-def type-id='95e97e5e' size-in-bits='64' id='7292109c'/>
+ <pointer-type-def type-id='1b055409' size-in-bits='64' id='9d424d31'/>
+ <pointer-type-def type-id='9c313c2d' size-in-bits='64' id='5d6479ae'/>
+ <pointer-type-def type-id='bd9b4291' size-in-bits='64' id='9f1a1109'/>
+ <pointer-type-def type-id='bdb8ac4f' size-in-bits='64' id='3a9b2288'/>
+ <pointer-type-def type-id='3a9b2288' size-in-bits='64' id='e4378506'/>
+ <pointer-type-def type-id='a2256d42' size-in-bits='64' id='debc6aa3'/>
+ <function-decl name='zfs_wait_status' mangled-name='zfs_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_wait_status'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='3024501a' name='activity'/>
+ <parameter type-id='37e3bd22' name='missing'/>
+ <parameter type-id='37e3bd22' name='waited'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfeature_lookup_name' mangled-name='zfeature_lookup_name' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zvol_volsize_to_reservation' mangled-name='zvol_volsize_to_reservation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zvol_volsize_to_reservation'>
+ <parameter type-id='4c81de99' name='zph'/>
+ <parameter type-id='9c313c2d' name='volsize'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <return type-id='9c313c2d'/>
</function-decl>
- <function-decl name='lzc_pool_checkpoint_discard' mangled-name='lzc_pool_checkpoint_discard' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_get_holds' mangled-name='zfs_get_holds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_holds'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='857bb57e' name='nvl'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='lzc_pool_checkpoint' mangled-name='lzc_pool_checkpoint' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_set_fsacl' mangled-name='zfs_set_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_set_fsacl'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='c19b74c3' name='un'/>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='nvlist_add_uint8_array' mangled-name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_get_fsacl' mangled-name='zfs_get_fsacl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_fsacl'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='857bb57e' name='nvl'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_refresh_stats' mangled-name='zpool_refresh_stats' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_release' mangled-name='zfs_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_release'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='80f4b756' name='tag'/>
+ <parameter type-id='c19b74c3' name='recursive'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='pool_namecheck' mangled-name='pool_namecheck' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_hold_nvl' mangled-name='zfs_hold_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold_nvl'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='95e97e5e' name='cleanup_fd'/>
+ <parameter type-id='5ce45b60' name='holds'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_prop_feature' mangled-name='zpool_prop_feature' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_hold' mangled-name='zfs_hold' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_hold'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='80f4b756' name='tag'/>
+ <parameter type-id='c19b74c3' name='recursive'/>
+ <parameter type-id='95e97e5e' name='cleanup_fd'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfeature_is_supported' mangled-name='zfeature_is_supported' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_userspace' mangled-name='zfs_userspace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_userspace'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='279fde6a' name='type'/>
+ <parameter type-id='16c5f410' name='func'/>
+ <parameter type-id='eaa32e2f' name='arg'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_name_valid' mangled-name='zfs_name_valid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_smb_acl_rename' mangled-name='zfs_smb_acl_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_rename'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='26a90f95' name='dataset'/>
+ <parameter type-id='26a90f95' name='path'/>
+ <parameter type-id='26a90f95' name='oldname'/>
+ <parameter type-id='26a90f95' name='newname'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_name_to_prop' mangled-name='zpool_name_to_prop' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_smb_acl_purge' mangled-name='zfs_smb_acl_purge' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_purge'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='26a90f95' name='dataset'/>
+ <parameter type-id='26a90f95' name='path'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_prop_readonly' mangled-name='zpool_prop_readonly' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_smb_acl_remove' mangled-name='zfs_smb_acl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_remove'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='26a90f95' name='dataset'/>
+ <parameter type-id='26a90f95' name='path'/>
+ <parameter type-id='26a90f95' name='resource'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_prop_setonce' mangled-name='zpool_prop_setonce' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_smb_acl_add' mangled-name='zfs_smb_acl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_smb_acl_add'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='26a90f95' name='dataset'/>
+ <parameter type-id='26a90f95' name='path'/>
+ <parameter type-id='26a90f95' name='resource'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='get_system_hostid' mangled-name='get_system_hostid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prune_proplist' mangled-name='zfs_prune_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prune_proplist'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='ae3e8ca6' name='props'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zpool_prop_get_type' mangled-name='zpool_prop_get_type' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_expand_proplist' mangled-name='zfs_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_expand_proplist'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='e4378506' name='plp'/>
+ <parameter type-id='c19b74c3' name='received'/>
+ <parameter type-id='c19b74c3' name='literal'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_prop_index_to_string' mangled-name='zpool_prop_index_to_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_get_user_props' mangled-name='zfs_get_user_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_user_props'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='5ce45b60'/>
</function-decl>
- <function-decl name='zpool_prop_default_numeric' mangled-name='zpool_prop_default_numeric' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_get_recvd_props' mangled-name='zfs_get_recvd_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_recvd_props'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='5ce45b60'/>
</function-decl>
- <function-decl name='zpool_prop_default_string' mangled-name='zpool_prop_default_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_get_all_props' mangled-name='zfs_get_all_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_all_props'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='5ce45b60'/>
</function-decl>
- <function-decl name='strtok_r' mangled-name='strtok_r' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_rename' mangled-name='zfs_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rename'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='target'/>
+ <parameter type-id='067170c2' name='flags'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='mmap' mangled-name='mmap64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_rollback' mangled-name='zfs_rollback' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_rollback'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='9200a744' name='snap'/>
+ <parameter type-id='c19b74c3' name='force'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='munmap' mangled-name='munmap' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_snapshot' mangled-name='zfs_snapshot' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='c19b74c3' name='recursive'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_get_status' mangled-name='zpool_get_status' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_snapshot_nvl' mangled-name='zfs_snapshot_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_snapshot_nvl'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='5ce45b60' name='snaps'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fnvlist_add_int64' mangled-name='fnvlist_add_int64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_promote' mangled-name='zfs_promote' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_promote'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_sendrecv.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <class-decl name='recvflags' size-in-bits='416' is-struct='yes' visibility='default' id='type-id-163'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='verbose' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='isprefix' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='istail' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='dryrun' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='force' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='canmountoff' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='resumable' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='byteswap' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='nomount' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='holds' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='skipholds' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='domount' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='forceunmount' type-id='type-id-9' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='recvflags_t' type-id='type-id-163' id='type-id-164'/>
- <pointer-type-def type-id='type-id-164' size-in-bits='64' id='type-id-165'/>
- <pointer-type-def type-id='type-id-11' size-in-bits='64' id='type-id-166'/>
- <function-decl name='zfs_receive' mangled-name='zfs_receive' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_receive'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='tosnap'/>
- <parameter type-id='type-id-19' name='props'/>
- <parameter type-id='type-id-165' name='flags'/>
- <parameter type-id='type-id-2' name='infd'/>
- <parameter type-id='type-id-166' name='stream_avl'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <class-decl name='sendflags' size-in-bits='544' is-struct='yes' visibility='default' id='type-id-167'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='verbosity' type-id='type-id-2' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='replicate' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='skipmissing' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='doall' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='fromorigin' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='pad' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='props' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='dryrun' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='parsable' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='progress' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='largeblock' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='embed_data' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='compress' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='416'>
- <var-decl name='raw' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='backup' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='holds' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='saved' type-id='type-id-9' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='sendflags_t' type-id='type-id-167' id='type-id-168'/>
- <pointer-type-def type-id='type-id-168' size-in-bits='64' id='type-id-169'/>
- <function-decl name='zfs_send_one' mangled-name='zfs_send_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_one'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='from'/>
- <parameter type-id='type-id-2' name='fd'/>
- <parameter type-id='type-id-169' name='flags'/>
- <parameter type-id='type-id-84' name='redactbook'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <typedef-decl name='snapfilter_cb_t' type-id='type-id-170' id='type-id-171'/>
- <pointer-type-def type-id='type-id-171' size-in-bits='64' id='type-id-172'/>
- <function-decl name='zfs_send' mangled-name='zfs_send' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='fromsnap'/>
- <parameter type-id='type-id-84' name='tosnap'/>
- <parameter type-id='type-id-169' name='flags'/>
- <parameter type-id='type-id-2' name='outfd'/>
- <parameter type-id='type-id-172' name='filter_func'/>
- <parameter type-id='type-id-13' name='cb_arg'/>
- <parameter type-id='type-id-86' name='debugnvp'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_clone' mangled-name='zfs_clone' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_clone'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='target'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_send_saved' mangled-name='zfs_send_saved' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_saved'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-169' name='flags'/>
- <parameter type-id='type-id-2' name='outfd'/>
- <parameter type-id='type-id-84' name='resume_token'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_destroy_snaps_nvl' mangled-name='zfs_destroy_snaps_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps_nvl'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='5ce45b60' name='snaps'/>
+ <parameter type-id='c19b74c3' name='defer'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_send_resume' mangled-name='zfs_send_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-169' name='flags'/>
- <parameter type-id='type-id-2' name='outfd'/>
- <parameter type-id='type-id-84' name='resume_token'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_destroy_snaps' mangled-name='zfs_destroy_snaps' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy_snaps'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='26a90f95' name='snapname'/>
+ <parameter type-id='c19b74c3' name='defer'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_send_resume_token_to_nvlist' mangled-name='zfs_send_resume_token_to_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume_token_to_nvlist'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='token'/>
- <return type-id='type-id-19'/>
+ <function-decl name='zfs_destroy' mangled-name='zfs_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_destroy'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='c19b74c3' name='defer'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_send_progress' mangled-name='zfs_send_progress' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_progress'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-2' name='fd'/>
- <parameter type-id='type-id-108' name='bytes_written'/>
- <parameter type-id='type-id-108' name='blocks_visited'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_create' mangled-name='zfs_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='2e45de5d' name='type'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='libzfs_set_pipe_max' mangled-name='libzfs_set_pipe_max' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_create_ancestors' mangled-name='zfs_create_ancestors' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_create_ancestors'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='perror' mangled-name='perror' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_parent_name' mangled-name='zfs_parent_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parent_name'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='26a90f95' name='buf'/>
+ <parameter type-id='b59d7dce' name='buflen'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_prop_set' mangled-name='zfs_prop_set' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_get_underlying_type' mangled-name='zfs_get_underlying_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_underlying_type'>
+ <parameter type-id='fcd57163' name='zhp'/>
+ <return type-id='2e45de5d'/>
</function-decl>
- <function-decl name='nvlist_lookup_boolean' mangled-name='nvlist_lookup_boolean' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_get_type' mangled-name='zfs_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_type'>
+ <parameter type-id='fcd57163' name='zhp'/>
+ <return type-id='2e45de5d'/>
</function-decl>
- <function-decl name='fletcher_4_incremental_byteswap' mangled-name='fletcher_4_incremental_byteswap' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_get_pool_name' mangled-name='zfs_get_pool_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_name'>
+ <parameter type-id='fcd57163' name='zhp'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='__builtin___strcat_chk' mangled-name='__strcat_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_get_name' mangled-name='zfs_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_name'>
+ <parameter type-id='fcd57163' name='zhp'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='fnvlist_merge' mangled-name='fnvlist_merge' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_get_written' mangled-name='zfs_prop_get_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='propname'/>
+ <parameter type-id='26a90f95' name='propbuf'/>
+ <parameter type-id='95e97e5e' name='proplen'/>
+ <parameter type-id='c19b74c3' name='literal'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='create_parents' mangled-name='create_parents' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_get_written_int' mangled-name='zfs_prop_get_written_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_written_int'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='propname'/>
+ <parameter type-id='5d6479ae' name='propvalue'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='nvpair_value_int32' mangled-name='nvpair_value_int32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_get_userquota' mangled-name='zfs_prop_get_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='propname'/>
+ <parameter type-id='26a90f95' name='propbuf'/>
+ <parameter type-id='95e97e5e' name='proplen'/>
+ <parameter type-id='c19b74c3' name='literal'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='time' mangled-name='time' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_get_userquota_int' mangled-name='zfs_prop_get_userquota_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_userquota_int'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='propname'/>
+ <parameter type-id='5d6479ae' name='propvalue'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fnvlist_add_nvpair' mangled-name='fnvlist_add_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_get_numeric' mangled-name='zfs_prop_get_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_numeric'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='58603c44' name='prop'/>
+ <parameter type-id='5d6479ae' name='value'/>
+ <parameter type-id='debc6aa3' name='src'/>
+ <parameter type-id='26a90f95' name='statbuf'/>
+ <parameter type-id='b59d7dce' name='statlen'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fnvlist_remove' mangled-name='fnvlist_remove' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_get_int' mangled-name='zfs_prop_get_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_int'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='58603c44' name='prop'/>
+ <return type-id='9c313c2d'/>
</function-decl>
- <function-decl name='lzc_receive_with_cmdprops' mangled-name='lzc_receive_with_cmdprops' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_get' mangled-name='zfs_prop_get' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='58603c44' name='prop'/>
+ <parameter type-id='26a90f95' name='propbuf'/>
+ <parameter type-id='b59d7dce' name='proplen'/>
+ <parameter type-id='debc6aa3' name='src'/>
+ <parameter type-id='26a90f95' name='statbuf'/>
+ <parameter type-id='b59d7dce' name='statlen'/>
+ <parameter type-id='c19b74c3' name='literal'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='__builtin___sprintf_chk' mangled-name='__sprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_get_clones_nvl' mangled-name='zfs_get_clones_nvl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_clones_nvl'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='5ce45b60'/>
</function-decl>
- <function-decl name='__builtin_puts' mangled-name='puts' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_get_recvd' mangled-name='zfs_prop_get_recvd' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_recvd'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='propname'/>
+ <parameter type-id='26a90f95' name='propbuf'/>
+ <parameter type-id='b59d7dce' name='proplen'/>
+ <parameter type-id='c19b74c3' name='literal'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fnvlist_lookup_uint64_array' mangled-name='fnvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_inherit' mangled-name='zfs_prop_inherit' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inherit'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='propname'/>
+ <parameter type-id='c19b74c3' name='received'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fletcher_4_incremental_native' mangled-name='fletcher_4_incremental_native' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_set_list' mangled-name='zfs_prop_set_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set_list'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='lzc_send_redacted' mangled-name='lzc_send_redacted' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_prop_set' mangled-name='zfs_prop_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_set'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='propname'/>
+ <parameter type-id='80f4b756' name='propval'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_hold_nvl' mangled-name='zfs_hold_nvl' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_valid_proplist' mangled-name='zfs_valid_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_valid_proplist'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='2e45de5d' name='type'/>
+ <parameter type-id='5ce45b60' name='nvl'/>
+ <parameter type-id='9c313c2d' name='zoned'/>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='4c81de99' name='zpool_hdl'/>
+ <parameter type-id='c19b74c3' name='key_params_ok'/>
+ <parameter type-id='80f4b756' name='errbuf'/>
+ <return type-id='5ce45b60'/>
</function-decl>
- <function-decl name='zfs_get_pool_handle' mangled-name='zfs_get_pool_handle' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_spa_version' mangled-name='zfs_spa_version' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='7292109c' name='spa_version'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fnvlist_size' mangled-name='fnvlist_size' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='libzfs_mnttab_remove' mangled-name='libzfs_mnttab_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_remove'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='fsname'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='write' mangled-name='write' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='libzfs_mnttab_add' mangled-name='libzfs_mnttab_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_add'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='special'/>
+ <parameter type-id='80f4b756' name='mountp'/>
+ <parameter type-id='80f4b756' name='mntopts'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='fnvlist_lookup_boolean_value' mangled-name='fnvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='libzfs_mnttab_cache' mangled-name='libzfs_mnttab_cache' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_cache'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='c19b74c3' name='enable'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='strndup' mangled-name='strndup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='libzfs_mnttab_fini' mangled-name='libzfs_mnttab_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_fini'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_send_resume_redacted' mangled-name='lzc_send_resume_redacted' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='libzfs_mnttab_init' mangled-name='libzfs_mnttab_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_init'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_print' mangled-name='nvlist_print' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_close' mangled-name='zfs_close' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_close'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_send_space_resume_redacted' mangled-name='lzc_send_space_resume_redacted' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_open' mangled-name='zfs_open' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_open'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='95e97e5e' name='types'/>
+ <return type-id='9200a744'/>
</function-decl>
- <function-decl name='fletcher_4_native_varsize' mangled-name='fletcher_4_native_varsize' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_bookmark_exists' mangled-name='zfs_bookmark_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_bookmark_exists'>
+ <parameter type-id='80f4b756' name='path'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='uncompress' mangled-name='uncompress' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_handle_dup' mangled-name='zfs_handle_dup' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_handle_dup'>
+ <parameter type-id='9200a744' name='zhp_orig'/>
+ <return type-id='9200a744'/>
</function-decl>
- <function-decl name='zfs_iter_snapshots_sorted' mangled-name='zfs_iter_snapshots_sorted' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_refresh_properties' mangled-name='zfs_refresh_properties' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_refresh_properties'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_send_space' mangled-name='lzc_send_space' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_free_handles' mangled-name='zpool_free_handles' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_free_handles'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='sleep' mangled-name='sleep' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_name_valid' mangled-name='zfs_name_valid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_valid'>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='2e45de5d' name='type'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='localtime' mangled-name='localtime' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_type_to_name' mangled-name='zfs_type_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_type_to_name'>
+ <parameter type-id='2e45de5d' name='type'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='zfs_get_recvd_props' mangled-name='zfs_get_recvd_props' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='libzfs_mnttab_find' mangled-name='libzfs_mnttab_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_mnttab_find'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='fsname'/>
+ <parameter type-id='9d424d31' name='entry'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='lzc_rename' mangled-name='lzc_rename' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='getprop_uint64' mangled-name='getprop_uint64' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getprop_uint64'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='58603c44' name='prop'/>
+ <parameter type-id='9b23c9ad' name='source'/>
+ <return type-id='9c313c2d'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-170'>
- <parameter type-id='type-id-76'/>
- <parameter type-id='type-id-13'/>
- <return type-id='type-id-9'/>
+ <function-decl name='zfs_dataset_exists' mangled-name='zfs_dataset_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_exists'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='2e45de5d' name='types'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='7e291ce6'>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='354978ed'/>
+ <parameter type-id='9c313c2d'/>
+ <return type-id='95e97e5e'/>
</function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_status.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-173'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZPOOL_STATUS_CORRUPT_CACHE' value='0'/>
- <enumerator name='ZPOOL_STATUS_MISSING_DEV_R' value='1'/>
- <enumerator name='ZPOOL_STATUS_MISSING_DEV_NR' value='2'/>
- <enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_R' value='3'/>
- <enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_NR' value='4'/>
- <enumerator name='ZPOOL_STATUS_BAD_GUID_SUM' value='5'/>
- <enumerator name='ZPOOL_STATUS_CORRUPT_POOL' value='6'/>
- <enumerator name='ZPOOL_STATUS_CORRUPT_DATA' value='7'/>
- <enumerator name='ZPOOL_STATUS_FAILING_DEV' value='8'/>
- <enumerator name='ZPOOL_STATUS_VERSION_NEWER' value='9'/>
- <enumerator name='ZPOOL_STATUS_HOSTID_MISMATCH' value='10'/>
- <enumerator name='ZPOOL_STATUS_HOSTID_ACTIVE' value='11'/>
- <enumerator name='ZPOOL_STATUS_HOSTID_REQUIRED' value='12'/>
- <enumerator name='ZPOOL_STATUS_IO_FAILURE_WAIT' value='13'/>
- <enumerator name='ZPOOL_STATUS_IO_FAILURE_CONTINUE' value='14'/>
- <enumerator name='ZPOOL_STATUS_IO_FAILURE_MMP' value='15'/>
- <enumerator name='ZPOOL_STATUS_BAD_LOG' value='16'/>
- <enumerator name='ZPOOL_STATUS_ERRATA' value='17'/>
- <enumerator name='ZPOOL_STATUS_UNSUP_FEAT_READ' value='18'/>
- <enumerator name='ZPOOL_STATUS_UNSUP_FEAT_WRITE' value='19'/>
- <enumerator name='ZPOOL_STATUS_FAULTED_DEV_R' value='20'/>
- <enumerator name='ZPOOL_STATUS_FAULTED_DEV_NR' value='21'/>
- <enumerator name='ZPOOL_STATUS_VERSION_OLDER' value='22'/>
- <enumerator name='ZPOOL_STATUS_FEAT_DISABLED' value='23'/>
- <enumerator name='ZPOOL_STATUS_RESILVERING' value='24'/>
- <enumerator name='ZPOOL_STATUS_OFFLINE_DEV' value='25'/>
- <enumerator name='ZPOOL_STATUS_REMOVED_DEV' value='26'/>
- <enumerator name='ZPOOL_STATUS_REBUILDING' value='27'/>
- <enumerator name='ZPOOL_STATUS_REBUILD_SCRUB' value='28'/>
- <enumerator name='ZPOOL_STATUS_NON_NATIVE_ASHIFT' value='29'/>
- <enumerator name='ZPOOL_STATUS_COMPATIBILITY_ERR' value='30'/>
- <enumerator name='ZPOOL_STATUS_INCOMPATIBLE_FEAT' value='31'/>
- <enumerator name='ZPOOL_STATUS_OK' value='32'/>
- </enum-decl>
- <typedef-decl name='zpool_status_t' type-id='type-id-173' id='type-id-174'/>
- <enum-decl name='zpool_errata' id='type-id-175'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZPOOL_ERRATA_NONE' value='0'/>
- <enumerator name='ZPOOL_ERRATA_ZOL_2094_SCRUB' value='1'/>
- <enumerator name='ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY' value='2'/>
- <enumerator name='ZPOOL_ERRATA_ZOL_6845_ENCRYPTION' value='3'/>
- <enumerator name='ZPOOL_ERRATA_ZOL_8308_ENCRYPTION' value='4'/>
+ <abi-instr version='1.0' address-size='64' path='libzfs_diff.c' language='LANG_C99'>
+ <function-decl name='zfs_show_diffs' mangled-name='zfs_show_diffs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_show_diffs'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='95e97e5e' name='outfd'/>
+ <parameter type-id='80f4b756' name='fromsnap'/>
+ <parameter type-id='80f4b756' name='tosnap'/>
+ <parameter type-id='95e97e5e' name='flags'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_import.c' language='LANG_C99'>
+ <typedef-decl name='pool_config_ops_t' type-id='1a21babe' id='b1e62775'/>
+ <class-decl name='pool_config_ops' size-in-bits='128' is-struct='yes' visibility='default' id='8b092c69'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='pco_refresh_config' type-id='e7c00489' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='pco_pool_active' type-id='9eadf5e0' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='refresh_config_func_t' type-id='29f040d2' id='b7c58eaa'/>
+ <typedef-decl name='pool_active_func_t' type-id='baa42fef' id='de5d1d8f'/>
+ <typedef-decl name='pool_state_t' type-id='4871ac24' id='084a08a3'/>
+ <enum-decl name='pool_state' id='4871ac24'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='POOL_STATE_ACTIVE' value='0'/>
+ <enumerator name='POOL_STATE_EXPORTED' value='1'/>
+ <enumerator name='POOL_STATE_DESTROYED' value='2'/>
+ <enumerator name='POOL_STATE_SPARE' value='3'/>
+ <enumerator name='POOL_STATE_L2CACHE' value='4'/>
+ <enumerator name='POOL_STATE_UNINITIALIZED' value='5'/>
+ <enumerator name='POOL_STATE_UNAVAIL' value='6'/>
+ <enumerator name='POOL_STATE_POTENTIALLY_ACTIVE' value='7'/>
</enum-decl>
- <typedef-decl name='zpool_errata_t' type-id='type-id-175' id='type-id-176'/>
- <pointer-type-def type-id='type-id-176' size-in-bits='64' id='type-id-177'/>
- <function-decl name='zpool_import_status' mangled-name='zpool_import_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_status'>
- <parameter type-id='type-id-19' name='config'/>
- <parameter type-id='type-id-117' name='msgid'/>
- <parameter type-id='type-id-177' name='errata'/>
- <return type-id='type-id-174'/>
+ <qualified-type-def type-id='8b092c69' const='yes' id='1a21babe'/>
+ <pointer-type-def type-id='de5d1d8f' size-in-bits='64' id='9eadf5e0'/>
+ <pointer-type-def type-id='084a08a3' size-in-bits='64' id='b9ea57b8'/>
+ <pointer-type-def type-id='b7c58eaa' size-in-bits='64' id='e7c00489'/>
+ <var-decl name='libzfs_config_ops' type-id='b1e62775' mangled-name='libzfs_config_ops' visibility='default' elf-symbol-id='libzfs_config_ops'/>
+ <function-decl name='zpool_in_use' mangled-name='zpool_in_use' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_in_use'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='b9ea57b8' name='state'/>
+ <parameter type-id='9b23c9ad' name='namestr'/>
+ <parameter type-id='37e3bd22' name='inuse'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_get_status' mangled-name='zpool_get_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_status'>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-117' name='msgid'/>
- <parameter type-id='type-id-177' name='errata'/>
- <return type-id='type-id-174'/>
+ <function-decl name='zpool_clear_label' mangled-name='zpool_clear_label' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear_label'>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='baa42fef'>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='9c313c2d'/>
+ <parameter type-id='37e3bd22'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='29f040d2'>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <return type-id='5ce45b60'/>
+ </function-type>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_iter.c' language='LANG_C99'>
+ <function-decl name='zfs_iter_mounted' mangled-name='zfs_iter_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_mounted'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='d8e49ab9' name='func'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_load_compat' mangled-name='zpool_load_compat' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_iter_dependents' mangled-name='zfs_iter_dependents' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_dependents'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='c19b74c3' name='allowrecursion'/>
+ <parameter type-id='d8e49ab9' name='func'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libzfs_util.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='printf_color' mangled-name='printf_color' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='printf_color'>
- <parameter type-id='type-id-14' name='color'/>
- <parameter type-id='type-id-14' name='format'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_iter_children' mangled-name='zfs_iter_children' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_children'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='d8e49ab9' name='func'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_version_print' mangled-name='zfs_version_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_print'>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_iter_snapspec' mangled-name='zfs_iter_snapspec' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapspec'>
+ <parameter type-id='9200a744' name='fs_zhp'/>
+ <parameter type-id='80f4b756' name='spec_orig'/>
+ <parameter type-id='d8e49ab9' name='func'/>
+ <parameter type-id='eaa32e2f' name='arg'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_version_userland' mangled-name='zfs_version_userland' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_userland'>
- <parameter type-id='type-id-14' name='version'/>
- <parameter type-id='type-id-2' name='len'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_iter_snapshots_sorted' mangled-name='zfs_iter_snapshots_sorted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots_sorted'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='d8e49ab9' name='callback'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <parameter type-id='9c313c2d' name='min_txg'/>
+ <parameter type-id='9c313c2d' name='max_txg'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <pointer-type-def type-id='type-id-178' size-in-bits='64' id='type-id-179'/>
- <typedef-decl name='zprop_func' type-id='type-id-179' id='type-id-180'/>
- <function-decl name='zprop_iter' mangled-name='zprop_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter'>
- <parameter type-id='type-id-180' name='func'/>
- <parameter type-id='type-id-13' name='cb'/>
- <parameter type-id='type-id-9' name='show_all'/>
- <parameter type-id='type-id-9' name='ordered'/>
- <parameter type-id='type-id-66' name='type'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_iter_bookmarks' mangled-name='zfs_iter_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_bookmarks'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='d8e49ab9' name='func'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zprop_free_list' mangled-name='zprop_free_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_free_list'>
- <parameter type-id='type-id-102' name='pl'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_iter_snapshots' mangled-name='zfs_iter_snapshots' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_snapshots'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='c19b74c3' name='simple'/>
+ <parameter type-id='d8e49ab9' name='func'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <parameter type-id='9c313c2d' name='min_txg'/>
+ <parameter type-id='9c313c2d' name='max_txg'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zprop_get_list' mangled-name='zprop_get_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_get_list'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-14' name='props'/>
- <parameter type-id='type-id-103' name='listp'/>
- <parameter type-id='type-id-66' name='type'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_iter_filesystems' mangled-name='zfs_iter_filesystems' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_iter_filesystems'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='d8e49ab9' name='func'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <class-decl name='zprop_get_cbdata' size-in-bits='640' is-struct='yes' visibility='default' id='type-id-181'>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_mount.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='f1bd64e2' size-in-bits='384' id='b2c36c9f'>
+ <subrange length='2' type-id='7359adad' id='52efc4ef'/>
+ </array-type-def>
+ <typedef-decl name='proto_table_t' type-id='9faf92fc' id='f1bd64e2'/>
+ <class-decl name='__anonymous_struct__' size-in-bits='192' is-struct='yes' is-anonymous='yes' naming-typedef-id='f1bd64e2' visibility='default' id='9faf92fc'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='cb_sources' type-id='type-id-2' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='cb_columns' type-id='type-id-182' visibility='default'/>
+ <var-decl name='p_prop' type-id='58603c44' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='cb_colwidths' type-id='type-id-183' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='p_name' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='cb_scripted' type-id='type-id-9' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='p_share_err' type-id='95e97e5e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='416'>
- <var-decl name='cb_literal' type-id='type-id-9' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='p_unshare_err' type-id='95e97e5e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='cb_first' type-id='type-id-9' visibility='default'/>
+ </class-decl>
+ <typedef-decl name='get_all_cb_t' type-id='803dac95' id='9b293607'/>
+ <class-decl name='get_all_cb' size-in-bits='192' is-struct='yes' visibility='default' id='803dac95'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='cb_handles' type-id='4507922a' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='cb_proplist' type-id='type-id-102' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='cb_alloc' type-id='b59d7dce' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='cb_type' type-id='type-id-66' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='cb_used' type-id='b59d7dce' visibility='default'/>
</data-member>
</class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-184'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='GET_COL_NONE' value='0'/>
- <enumerator name='GET_COL_NAME' value='1'/>
- <enumerator name='GET_COL_PROPERTY' value='2'/>
- <enumerator name='GET_COL_VALUE' value='3'/>
- <enumerator name='GET_COL_RECVD' value='4'/>
- <enumerator name='GET_COL_SOURCE' value='5'/>
- </enum-decl>
- <typedef-decl name='zfs_get_column_t' type-id='type-id-184' id='type-id-185'/>
-
- <array-type-def dimensions='1' type-id='type-id-185' size-in-bits='160' alignment-in-bits='32' id='type-id-182'>
- <subrange length='5' type-id='type-id-24' id='type-id-186'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='192' id='type-id-183'>
- <subrange length='6' type-id='type-id-24' id='type-id-187'/>
-
- </array-type-def>
- <typedef-decl name='zprop_get_cbdata_t' type-id='type-id-181' id='type-id-188'/>
- <pointer-type-def type-id='type-id-188' size-in-bits='64' id='type-id-189'/>
- <function-decl name='zprop_print_one_property' mangled-name='zprop_print_one_property' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_print_one_property'>
- <parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-189' name='cbp'/>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-84' name='value'/>
- <parameter type-id='type-id-112' name='sourcetype'/>
- <parameter type-id='type-id-84' name='source'/>
- <parameter type-id='type-id-84' name='recvd_value'/>
- <return type-id='type-id-1'/>
+ <pointer-type-def type-id='9b293607' size-in-bits='64' id='77bf1784'/>
+ <pointer-type-def type-id='9200a744' size-in-bits='64' id='4507922a'/>
+ <var-decl name='proto_table' type-id='b2c36c9f' visibility='default'/>
+ <function-decl name='zpool_disable_datasets' mangled-name='zpool_disable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disable_datasets'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='c19b74c3' name='force'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_path_to_zhandle' mangled-name='zfs_path_to_zhandle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_path_to_zhandle'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-66' name='argtype'/>
- <return type-id='type-id-76'/>
+ <function-decl name='zpool_enable_datasets' mangled-name='zpool_enable_datasets' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_enable_datasets'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='mntopts'/>
+ <parameter type-id='95e97e5e' name='flags'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_get_pool_handle' mangled-name='zfs_get_pool_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_handle'>
- <parameter type-id='type-id-107' name='zhp'/>
- <return type-id='type-id-4'/>
+ <function-decl name='zfs_foreach_mountpoint' mangled-name='zfs_foreach_mountpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_foreach_mountpoint'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='4507922a' name='handles'/>
+ <parameter type-id='b59d7dce' name='num_handles'/>
+ <parameter type-id='d8e49ab9' name='func'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <parameter type-id='c19b74c3' name='parallel'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zfs_get_handle' mangled-name='zfs_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_handle'>
- <parameter type-id='type-id-76' name='zhp'/>
- <return type-id='type-id-16'/>
+ <function-decl name='libzfs_add_handle' mangled-name='libzfs_add_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_add_handle'>
+ <parameter type-id='77bf1784' name='cbp'/>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zpool_get_handle' mangled-name='zpool_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_handle'>
- <parameter type-id='type-id-4' name='zhp'/>
- <return type-id='type-id-16'/>
+ <function-decl name='zfs_unshareall_bytype' mangled-name='zfs_unshareall_bytype' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_bytype'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='mountpoint'/>
+ <parameter type-id='80f4b756' name='proto'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='libzfs_fini' mangled-name='libzfs_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_fini'>
- <parameter type-id='type-id-16' name='hdl'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_unshareall_bypath' mangled-name='zfs_unshareall_bypath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_bypath'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='mountpoint'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='libzfs_init' mangled-name='libzfs_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_init'>
- <return type-id='type-id-16'/>
+ <function-decl name='zfs_unshareall' mangled-name='zfs_unshareall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='libzfs_envvar_is_set' mangled-name='libzfs_envvar_is_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_envvar_is_set'>
- <parameter type-id='type-id-14' name='envvar'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_unshareall_smb' mangled-name='zfs_unshareall_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_smb'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='libzfs_free_str_array' mangled-name='libzfs_free_str_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_free_str_array'>
- <parameter type-id='type-id-117' name='strs'/>
- <parameter type-id='type-id-2' name='count'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_unshareall_nfs' mangled-name='zfs_unshareall_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshareall_nfs'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <pointer-type-def type-id='type-id-117' size-in-bits='64' id='type-id-190'/>
- <function-decl name='libzfs_run_process_get_stdout_nopath' mangled-name='libzfs_run_process_get_stdout_nopath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout_nopath'>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-117' name='argv'/>
- <parameter type-id='type-id-117' name='env'/>
- <parameter type-id='type-id-190' name='lines'/>
- <parameter type-id='type-id-114' name='lines_cnt'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_unshare_smb' mangled-name='zfs_unshare_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare_smb'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='mountpoint'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='libzfs_run_process_get_stdout' mangled-name='libzfs_run_process_get_stdout' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout'>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-117' name='argv'/>
- <parameter type-id='type-id-117' name='env'/>
- <parameter type-id='type-id-190' name='lines'/>
- <parameter type-id='type-id-114' name='lines_cnt'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_unshare_nfs' mangled-name='zfs_unshare_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare_nfs'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='mountpoint'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='libzfs_run_process' mangled-name='libzfs_run_process' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process'>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-117' name='argv'/>
- <parameter type-id='type-id-2' name='flags'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_share_smb' mangled-name='zfs_share_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share_smb'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='libzfs_print_on_error' mangled-name='libzfs_print_on_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_print_on_error'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-9' name='enable'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_share_nfs' mangled-name='zfs_share_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share_nfs'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_standard_error' mangled-name='zfs_standard_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_standard_error'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-2' name='error'/>
- <parameter type-id='type-id-84' name='msg'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_commit_shares' mangled-name='zfs_commit_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_shares'>
+ <parameter type-id='80f4b756' name='proto'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='libzfs_error_action' mangled-name='libzfs_error_action' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_action'>
- <parameter type-id='type-id-16' name='hdl'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_commit_all_shares' mangled-name='zfs_commit_all_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_all_shares'>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='libzfs_errno' mangled-name='libzfs_errno' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_errno'>
- <parameter type-id='type-id-16' name='hdl'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_commit_smb_shares' mangled-name='zfs_commit_smb_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_smb_shares'>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='libzfs_error_description' mangled-name='libzfs_error_description' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_description'>
- <parameter type-id='type-id-16' name='hdl'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_commit_nfs_shares' mangled-name='zfs_commit_nfs_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_commit_nfs_shares'>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zfs_nicestrtonum' mangled-name='zfs_nicestrtonum' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicestrtonum'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-84' name='value'/>
- <parameter type-id='type-id-108' name='num'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zfs_is_shared_smb' mangled-name='zfs_is_shared_smb' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared_smb'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='9b23c9ad' name='where'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='color_start' mangled-name='color_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_start'>
- <parameter type-id='type-id-14' name='color'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_is_shared_nfs' mangled-name='zfs_is_shared_nfs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared_nfs'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='9b23c9ad' name='where'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='color_end' mangled-name='color_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_end'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_unshare' mangled-name='zfs_unshare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unshare'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zfs_share' mangled-name='zfs_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_share'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zfs_is_shared' mangled-name='zfs_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_shared'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='zfs_unmountall' mangled-name='zfs_unmountall' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmountall'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='95e97e5e' name='flags'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zfs_unmount' mangled-name='zfs_unmount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_unmount'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='mountpoint'/>
+ <parameter type-id='95e97e5e' name='flags'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zfs_mount_at' mangled-name='zfs_mount_at' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_at'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='options'/>
+ <parameter type-id='95e97e5e' name='flags'/>
+ <parameter type-id='80f4b756' name='mountpoint'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zfs_mount' mangled-name='zfs_mount' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='options'/>
+ <parameter type-id='95e97e5e' name='flags'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zfs_is_mounted' mangled-name='zfs_is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_is_mounted'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='9b23c9ad' name='where'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ <function-decl name='is_mounted' mangled-name='is_mounted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_mounted'>
+ <parameter type-id='b0382bb3' name='zfs_hdl'/>
+ <parameter type-id='80f4b756' name='special'/>
+ <parameter type-id='9b23c9ad' name='where'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='libzfs_pool.c' language='LANG_C99'>
+ <typedef-decl name='zpool_wait_activity_t' type-id='08f5ca1d' id='73446457'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca1d'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZPOOL_WAIT_CKPT_DISCARD' value='0'/>
+ <enumerator name='ZPOOL_WAIT_FREE' value='1'/>
+ <enumerator name='ZPOOL_WAIT_INITIALIZE' value='2'/>
+ <enumerator name='ZPOOL_WAIT_REPLACE' value='3'/>
+ <enumerator name='ZPOOL_WAIT_REMOVE' value='4'/>
+ <enumerator name='ZPOOL_WAIT_RESILVER' value='5'/>
+ <enumerator name='ZPOOL_WAIT_SCRUB' value='6'/>
+ <enumerator name='ZPOOL_WAIT_TRIM' value='7'/>
+ <enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='8'/>
+ </enum-decl>
+ <typedef-decl name='splitflags_t' type-id='dc01bf52' id='325c1e34'/>
+ <class-decl name='splitflags' size-in-bits='64' is-struct='yes' visibility='default' id='dc01bf52'>
+ <data-member access='public' layout-offset-in-bits='31'>
+ <var-decl name='dryrun' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='30'>
+ <var-decl name='import' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='name_flags' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='vdev_aux_t' type-id='7f5bcca4' id='9d774e0b'/>
+ <enum-decl name='vdev_aux' id='7f5bcca4'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='VDEV_AUX_NONE' value='0'/>
+ <enumerator name='VDEV_AUX_OPEN_FAILED' value='1'/>
+ <enumerator name='VDEV_AUX_CORRUPT_DATA' value='2'/>
+ <enumerator name='VDEV_AUX_NO_REPLICAS' value='3'/>
+ <enumerator name='VDEV_AUX_BAD_GUID_SUM' value='4'/>
+ <enumerator name='VDEV_AUX_TOO_SMALL' value='5'/>
+ <enumerator name='VDEV_AUX_BAD_LABEL' value='6'/>
+ <enumerator name='VDEV_AUX_VERSION_NEWER' value='7'/>
+ <enumerator name='VDEV_AUX_VERSION_OLDER' value='8'/>
+ <enumerator name='VDEV_AUX_UNSUP_FEAT' value='9'/>
+ <enumerator name='VDEV_AUX_SPARED' value='10'/>
+ <enumerator name='VDEV_AUX_ERR_EXCEEDED' value='11'/>
+ <enumerator name='VDEV_AUX_IO_FAILURE' value='12'/>
+ <enumerator name='VDEV_AUX_BAD_LOG' value='13'/>
+ <enumerator name='VDEV_AUX_EXTERNAL' value='14'/>
+ <enumerator name='VDEV_AUX_SPLIT_POOL' value='15'/>
+ <enumerator name='VDEV_AUX_BAD_ASHIFT' value='16'/>
+ <enumerator name='VDEV_AUX_EXTERNAL_PERSIST' value='17'/>
+ <enumerator name='VDEV_AUX_ACTIVE' value='18'/>
+ <enumerator name='VDEV_AUX_CHILDREN_OFFLINE' value='19'/>
+ <enumerator name='VDEV_AUX_ASHIFT_TOO_BIG' value='20'/>
+ </enum-decl>
+ <typedef-decl name='vdev_state_t' type-id='21566197' id='35acf840'/>
+ <enum-decl name='vdev_state' id='21566197'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='VDEV_STATE_UNKNOWN' value='0'/>
+ <enumerator name='VDEV_STATE_CLOSED' value='1'/>
+ <enumerator name='VDEV_STATE_OFFLINE' value='2'/>
+ <enumerator name='VDEV_STATE_REMOVED' value='3'/>
+ <enumerator name='VDEV_STATE_CANT_OPEN' value='4'/>
+ <enumerator name='VDEV_STATE_FAULTED' value='5'/>
+ <enumerator name='VDEV_STATE_DEGRADED' value='6'/>
+ <enumerator name='VDEV_STATE_HEALTHY' value='7'/>
+ </enum-decl>
+ <typedef-decl name='pool_scan_func_t' type-id='1b092565' id='7313fbe2'/>
+ <enum-decl name='pool_scan_func' id='1b092565'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='POOL_SCAN_NONE' value='0'/>
+ <enumerator name='POOL_SCAN_SCRUB' value='1'/>
+ <enumerator name='POOL_SCAN_RESILVER' value='2'/>
+ <enumerator name='POOL_SCAN_FUNCS' value='3'/>
+ </enum-decl>
+ <typedef-decl name='pool_scrub_cmd_t' type-id='a1474cbd' id='b51cf3c2'/>
+ <enum-decl name='pool_scrub_cmd' id='a1474cbd'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='POOL_SCRUB_NORMAL' value='0'/>
+ <enumerator name='POOL_SCRUB_PAUSE' value='1'/>
+ <enumerator name='POOL_SCRUB_FLAGS_END' value='2'/>
+ </enum-decl>
+ <typedef-decl name='pool_trim_func_t' type-id='54ed608a' id='b1146b8d'/>
+ <enum-decl name='pool_trim_func' id='54ed608a'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='POOL_TRIM_START' value='0'/>
+ <enumerator name='POOL_TRIM_CANCEL' value='1'/>
+ <enumerator name='POOL_TRIM_SUSPEND' value='2'/>
+ <enumerator name='POOL_TRIM_FUNCS' value='3'/>
+ </enum-decl>
+ <typedef-decl name='trimflags_t' type-id='8ef58008' id='a093cbb8'/>
+ <class-decl name='trimflags' size-in-bits='192' is-struct='yes' visibility='default' id='8ef58008'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='fullpool' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='secure' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='wait' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='rate' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='pool_initialize_func_t' type-id='5c246ad4' id='7063e1ab'/>
+ <enum-decl name='pool_initialize_func' id='5c246ad4'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='POOL_INITIALIZE_START' value='0'/>
+ <enumerator name='POOL_INITIALIZE_CANCEL' value='1'/>
+ <enumerator name='POOL_INITIALIZE_SUSPEND' value='2'/>
+ <enumerator name='POOL_INITIALIZE_FUNCS' value='3'/>
+ </enum-decl>
+ <typedef-decl name='zpool_prop_t' type-id='40ed39d5' id='5d0c23fb'/>
+ <enum-decl name='__anonymous_enum__1' is-anonymous='yes' id='40ed39d5'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZPOOL_PROP_INVAL' value='-1'/>
+ <enumerator name='ZPOOL_PROP_NAME' value='0'/>
+ <enumerator name='ZPOOL_PROP_SIZE' value='1'/>
+ <enumerator name='ZPOOL_PROP_CAPACITY' value='2'/>
+ <enumerator name='ZPOOL_PROP_ALTROOT' value='3'/>
+ <enumerator name='ZPOOL_PROP_HEALTH' value='4'/>
+ <enumerator name='ZPOOL_PROP_GUID' value='5'/>
+ <enumerator name='ZPOOL_PROP_VERSION' value='6'/>
+ <enumerator name='ZPOOL_PROP_BOOTFS' value='7'/>
+ <enumerator name='ZPOOL_PROP_DELEGATION' value='8'/>
+ <enumerator name='ZPOOL_PROP_AUTOREPLACE' value='9'/>
+ <enumerator name='ZPOOL_PROP_CACHEFILE' value='10'/>
+ <enumerator name='ZPOOL_PROP_FAILUREMODE' value='11'/>
+ <enumerator name='ZPOOL_PROP_LISTSNAPS' value='12'/>
+ <enumerator name='ZPOOL_PROP_AUTOEXPAND' value='13'/>
+ <enumerator name='ZPOOL_PROP_DEDUPDITTO' value='14'/>
+ <enumerator name='ZPOOL_PROP_DEDUPRATIO' value='15'/>
+ <enumerator name='ZPOOL_PROP_FREE' value='16'/>
+ <enumerator name='ZPOOL_PROP_ALLOCATED' value='17'/>
+ <enumerator name='ZPOOL_PROP_READONLY' value='18'/>
+ <enumerator name='ZPOOL_PROP_ASHIFT' value='19'/>
+ <enumerator name='ZPOOL_PROP_COMMENT' value='20'/>
+ <enumerator name='ZPOOL_PROP_EXPANDSZ' value='21'/>
+ <enumerator name='ZPOOL_PROP_FREEING' value='22'/>
+ <enumerator name='ZPOOL_PROP_FRAGMENTATION' value='23'/>
+ <enumerator name='ZPOOL_PROP_LEAKED' value='24'/>
+ <enumerator name='ZPOOL_PROP_MAXBLOCKSIZE' value='25'/>
+ <enumerator name='ZPOOL_PROP_TNAME' value='26'/>
+ <enumerator name='ZPOOL_PROP_MAXDNODESIZE' value='27'/>
+ <enumerator name='ZPOOL_PROP_MULTIHOST' value='28'/>
+ <enumerator name='ZPOOL_PROP_CHECKPOINT' value='29'/>
+ <enumerator name='ZPOOL_PROP_LOAD_GUID' value='30'/>
+ <enumerator name='ZPOOL_PROP_AUTOTRIM' value='31'/>
+ <enumerator name='ZPOOL_PROP_COMPATIBILITY' value='32'/>
+ <enumerator name='ZPOOL_NUM_PROPS' value='33'/>
+ </enum-decl>
+ <typedef-decl name='zpool_compat_status_t' type-id='3fed3840' id='901b78d1'/>
+ <enum-decl name='__anonymous_enum__2' is-anonymous='yes' id='3fed3840'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZPOOL_COMPATIBILITY_OK' value='0'/>
+ <enumerator name='ZPOOL_COMPATIBILITY_WARNTOKEN' value='1'/>
+ <enumerator name='ZPOOL_COMPATIBILITY_BADTOKEN' value='2'/>
+ <enumerator name='ZPOOL_COMPATIBILITY_BADFILE' value='3'/>
+ <enumerator name='ZPOOL_COMPATIBILITY_NOFILES' value='4'/>
+ </enum-decl>
+ <qualified-type-def type-id='8e8d4be3' const='yes' id='693c3853'/>
+ <pointer-type-def type-id='693c3853' size-in-bits='64' id='22cce67b'/>
+ <pointer-type-def type-id='a093cbb8' size-in-bits='64' id='b13f38c3'/>
+ <pointer-type-def type-id='35acf840' size-in-bits='64' id='17f3480d'/>
+ <function-decl name='zpool_get_bootenv' mangled-name='zpool_get_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_bootenv'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='857bb57e' name='nvlp'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zpool_set_bootenv' mangled-name='zpool_set_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_bootenv'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='22cce67b' name='envmap'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='__vfprintf_chk' mangled-name='__vfprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_wait_status' mangled-name='zpool_wait_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait_status'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='73446457' name='activity'/>
+ <parameter type-id='37e3bd22' name='missing'/>
+ <parameter type-id='37e3bd22' name='waited'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_version_kernel' mangled-name='zfs_version_kernel' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_wait' mangled-name='zpool_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_wait'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='73446457' name='activity'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zprop_iter_common' mangled-name='zprop_iter_common' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_obj_to_path_ds' mangled-name='zpool_obj_to_path_ds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path_ds'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='9c313c2d' name='dsobj'/>
+ <parameter type-id='9c313c2d' name='obj'/>
+ <parameter type-id='26a90f95' name='pathname'/>
+ <parameter type-id='b59d7dce' name='len'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zprop_width' mangled-name='zprop_width' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_obj_to_path' mangled-name='zpool_obj_to_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_obj_to_path'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='9c313c2d' name='dsobj'/>
+ <parameter type-id='9c313c2d' name='obj'/>
+ <parameter type-id='26a90f95' name='pathname'/>
+ <parameter type-id='b59d7dce' name='len'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zprop_name_to_prop' mangled-name='zprop_name_to_prop' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_events_seek' mangled-name='zpool_events_seek' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_seek'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='9c313c2d' name='eid'/>
+ <parameter type-id='95e97e5e' name='zevent_fd'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zprop_valid_for_type' mangled-name='zprop_valid_for_type' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_events_clear' mangled-name='zpool_events_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_clear'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='7292109c' name='count'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_prop_unsupported' mangled-name='zpool_prop_unsupported' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_events_next' mangled-name='zpool_events_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_events_next'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='857bb57e' name='nvp'/>
+ <parameter type-id='7292109c' name='dropped'/>
+ <parameter type-id='f0981eeb' name='flags'/>
+ <parameter type-id='95e97e5e' name='zevent_fd'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zprop_string_to_index' mangled-name='zprop_string_to_index' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_get_history' mangled-name='zpool_get_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_history'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='857bb57e' name='nvhisp'/>
+ <parameter type-id='5d6479ae' name='off'/>
+ <parameter type-id='37e3bd22' name='eof'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zprop_values' mangled-name='zprop_values' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_log_history' mangled-name='zpool_log_history' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_log_history'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='message'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='__ctype_toupper_loc' mangled-name='__ctype_toupper_loc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_save_arguments' mangled-name='zfs_save_arguments' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_save_arguments'>
+ <parameter type-id='95e97e5e' name='argc'/>
+ <parameter type-id='9b23c9ad' name='argv'/>
+ <parameter type-id='26a90f95' name='string'/>
+ <parameter type-id='95e97e5e' name='len'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='getextmntent' mangled-name='getextmntent' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_upgrade' mangled-name='zpool_upgrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_upgrade'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='9c313c2d' name='new_version'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_free_handles' mangled-name='zpool_free_handles' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_get_errlog' mangled-name='zpool_get_errlog' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_errlog'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='857bb57e' name='nverrlistp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='namespace_clear' mangled-name='namespace_clear' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_vdev_name' mangled-name='zpool_vdev_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_name'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='5ce45b60' name='nv'/>
+ <parameter type-id='95e97e5e' name='name_flags'/>
+ <return type-id='26a90f95'/>
</function-decl>
- <function-decl name='libzfs_mnttab_fini' mangled-name='libzfs_mnttab_fini' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_sync_one' mangled-name='zpool_sync_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_sync_one'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='libzfs_core_fini' mangled-name='libzfs_core_fini' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_reopen_one' mangled-name='zpool_reopen_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reopen_one'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='regfree' mangled-name='regfree' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_reguid' mangled-name='zpool_reguid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_reguid'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fletcher_4_fini' mangled-name='fletcher_4_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_fini'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_vdev_clear' mangled-name='zpool_vdev_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_clear'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='9c313c2d' name='guid'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_prop_get_table' mangled-name='zpool_prop_get_table' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_clear' mangled-name='zpool_clear' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_clear'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='5ce45b60' name='rewindnvl'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_prop_get_table' mangled-name='zfs_prop_get_table' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_vdev_indirect_size' mangled-name='zpool_vdev_indirect_size' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_indirect_size'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='5d6479ae' name='sizep'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='libzfs_load_module' mangled-name='libzfs_load_module' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_vdev_remove_cancel' mangled-name='zpool_vdev_remove_cancel' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove_cancel'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='regcomp' mangled-name='regcomp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_vdev_remove' mangled-name='zpool_vdev_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_remove'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='libzfs_core_init' mangled-name='libzfs_core_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_vdev_split' mangled-name='zpool_vdev_split' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_split'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='26a90f95' name='newname'/>
+ <parameter type-id='857bb57e' name='newroot'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='325c1e34' name='flags'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_prop_init' mangled-name='zfs_prop_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_vdev_detach' mangled-name='zpool_vdev_detach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_detach'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_prop_init' mangled-name='zpool_prop_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_vdev_attach' mangled-name='zpool_vdev_attach' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_attach'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='old_disk'/>
+ <parameter type-id='80f4b756' name='new_disk'/>
+ <parameter type-id='5ce45b60' name='nvroot'/>
+ <parameter type-id='95e97e5e' name='replacing'/>
+ <parameter type-id='c19b74c3' name='rebuild'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zpool_feature_init' mangled-name='zpool_feature_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_vdev_degrade' mangled-name='zpool_vdev_degrade' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_degrade'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='9c313c2d' name='guid'/>
+ <parameter type-id='9d774e0b' name='aux'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='libzfs_mnttab_init' mangled-name='libzfs_mnttab_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_vdev_fault' mangled-name='zpool_vdev_fault' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_fault'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='9c313c2d' name='guid'/>
+ <parameter type-id='9d774e0b' name='aux'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fletcher_4_init' mangled-name='fletcher_4_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_vdev_offline' mangled-name='zpool_vdev_offline' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_offline'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='c19b74c3' name='istmp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='strnlen' mangled-name='strnlen' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_vdev_online' mangled-name='zpool_vdev_online' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_online'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='95e97e5e' name='flags'/>
+ <parameter type-id='17f3480d' name='newstate'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='realloc' mangled-name='realloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_vdev_path_to_guid' mangled-name='zpool_vdev_path_to_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_vdev_path_to_guid'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <return type-id='9c313c2d'/>
</function-decl>
- <function-decl name='waitpid' mangled-name='waitpid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_get_physpath' mangled-name='zpool_get_physpath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_physpath'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='26a90f95' name='physpath'/>
+ <parameter type-id='b59d7dce' name='phypath_size'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fork' mangled-name='fork' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_find_vdev' mangled-name='zpool_find_vdev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='37e3bd22' name='avail_spare'/>
+ <parameter type-id='37e3bd22' name='l2cache'/>
+ <parameter type-id='37e3bd22' name='log'/>
+ <return type-id='5ce45b60'/>
</function-decl>
- <function-decl name='dup2' mangled-name='dup2' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_find_vdev_by_physpath' mangled-name='zpool_find_vdev_by_physpath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_vdev_by_physpath'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='ppath'/>
+ <parameter type-id='37e3bd22' name='avail_spare'/>
+ <parameter type-id='37e3bd22' name='l2cache'/>
+ <parameter type-id='37e3bd22' name='log'/>
+ <return type-id='5ce45b60'/>
</function-decl>
- <function-decl name='execve' mangled-name='execve' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_scan' mangled-name='zpool_scan' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_scan'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='7313fbe2' name='func'/>
+ <parameter type-id='b51cf3c2' name='cmd'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='_exit' mangled-name='_exit' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_trim' mangled-name='zpool_trim' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_trim'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='b1146b8d' name='cmd_type'/>
+ <parameter type-id='5ce45b60' name='vds'/>
+ <parameter type-id='b13f38c3' name='trim_flags'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='execvpe' mangled-name='execvpe' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_initialize_wait' mangled-name='zpool_initialize_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize_wait'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='7063e1ab' name='cmd_type'/>
+ <parameter type-id='5ce45b60' name='vds'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='execv' mangled-name='execv' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_initialize' mangled-name='zpool_initialize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_initialize'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='7063e1ab' name='cmd_type'/>
+ <parameter type-id='5ce45b60' name='vds'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='execvp' mangled-name='execvp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_import_props' mangled-name='zpool_import_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_props'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='5ce45b60' name='config'/>
+ <parameter type-id='80f4b756' name='newname'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='95e97e5e' name='flags'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='__vasprintf_chk' mangled-name='__vasprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_print_unsup_feat' mangled-name='zpool_print_unsup_feat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_print_unsup_feat'>
+ <parameter type-id='5ce45b60' name='config'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='__builtin___vsnprintf_chk' mangled-name='__vsnprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_import' mangled-name='zpool_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='5ce45b60' name='config'/>
+ <parameter type-id='80f4b756' name='newname'/>
+ <parameter type-id='26a90f95' name='altroot'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='exit' mangled-name='exit' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_explain_recover' mangled-name='zpool_explain_recover' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_explain_recover'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='95e97e5e' name='reason'/>
+ <parameter type-id='5ce45b60' name='config'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='strtod' mangled-name='strtod' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_export_force' mangled-name='zpool_export_force' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export_force'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='log_str'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='pow' mangled-name='pow' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_export' mangled-name='zpool_export' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_export'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='c19b74c3' name='force'/>
+ <parameter type-id='80f4b756' name='log_str'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-178'>
- <parameter type-id='type-id-2'/>
- <parameter type-id='type-id-13'/>
- <return type-id='type-id-2'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_mount_os.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zfs_mount_delegation_check' mangled-name='zfs_mount_delegation_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_delegation_check'>
- <return type-id='type-id-2'/>
+ <function-decl name='zpool_add' mangled-name='zpool_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_add'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='5ce45b60' name='nvroot'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_adjust_mount_options' mangled-name='zfs_adjust_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_adjust_mount_options'>
- <parameter type-id='type-id-76' name='zhp'/>
- <parameter type-id='type-id-84' name='mntpoint'/>
- <parameter type-id='type-id-14' name='mntopts'/>
- <parameter type-id='type-id-14' name='mtabopt'/>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_discard_checkpoint' mangled-name='zpool_discard_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_discard_checkpoint'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <pointer-type-def type-id='type-id-24' size-in-bits='64' id='type-id-191'/>
- <function-decl name='zfs_parse_mount_options' mangled-name='zfs_parse_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parse_mount_options'>
- <parameter type-id='type-id-14' name='mntopts'/>
- <parameter type-id='type-id-191' name='mntflags'/>
- <parameter type-id='type-id-191' name='zfsflags'/>
- <parameter type-id='type-id-2' name='sloppy'/>
- <parameter type-id='type-id-14' name='badopt'/>
- <parameter type-id='type-id-14' name='mtabopt'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zpool_checkpoint' mangled-name='zpool_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_checkpoint'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='geteuid' mangled-name='geteuid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_destroy' mangled-name='zpool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_destroy'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='log_str'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='umount2' mangled-name='umount2' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_create' mangled-name='zpool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_create'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='pool'/>
+ <parameter type-id='5ce45b60' name='nvroot'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='5ce45b60' name='fsprops'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='libzfs_envvar_is_set' mangled-name='libzfs_envvar_is_set' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_is_draid_spare' mangled-name='zpool_is_draid_spare' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_is_draid_spare'>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='libzfs_run_process' mangled-name='libzfs_run_process' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_get_state' mangled-name='zpool_get_state' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='mount' mangled-name='mount' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_get_name' mangled-name='zpool_get_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_name'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <return type-id='80f4b756'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_pool_os.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zpool_label_disk' mangled-name='zpool_label_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-4' name='zhp'/>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zpool_close' mangled-name='zpool_close' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_close'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='rand' mangled-name='rand' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_open' mangled-name='zpool_open' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='pool'/>
+ <return type-id='4c81de99'/>
</function-decl>
- <function-decl name='efi_alloc_and_read' mangled-name='efi_alloc_and_read' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_open_canfail' mangled-name='zpool_open_canfail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_open_canfail'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='pool'/>
+ <return type-id='4c81de99'/>
</function-decl>
- <function-decl name='efi_free' mangled-name='efi_free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_prop_get_feature' mangled-name='zpool_prop_get_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_feature'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='propname'/>
+ <parameter type-id='26a90f95' name='buf'/>
+ <parameter type-id='b59d7dce' name='len'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='efi_alloc_and_init' mangled-name='efi_alloc_and_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_expand_proplist' mangled-name='zpool_expand_proplist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_expand_proplist'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='e4378506' name='plp'/>
+ <parameter type-id='c19b74c3' name='literal'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='efi_write' mangled-name='efi_write' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_set_prop' mangled-name='zpool_set_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_set_prop'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='propname'/>
+ <parameter type-id='80f4b756' name='propval'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fsync' mangled-name='fsync' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_get_prop' mangled-name='zpool_get_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <parameter type-id='26a90f95' name='buf'/>
+ <parameter type-id='b59d7dce' name='len'/>
+ <parameter type-id='debc6aa3' name='srctype'/>
+ <parameter type-id='c19b74c3' name='literal'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='efi_rescan' mangled-name='efi_rescan' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_pool_state_to_name' mangled-name='zpool_pool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_pool_state_to_name'>
+ <parameter type-id='084a08a3' name='state'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='zfs_append_partition' mangled-name='zfs_append_partition' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_state_to_name' mangled-name='zpool_state_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_state_to_name'>
+ <parameter type-id='35acf840' name='state'/>
+ <parameter type-id='9d774e0b' name='aux'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='zpool_label_disk_wait' mangled-name='zpool_label_disk_wait' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_get_prop_int' mangled-name='zpool_get_prop_int' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_prop_int'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='5d0c23fb' name='prop'/>
+ <parameter type-id='debc6aa3' name='src'/>
+ <return type-id='9c313c2d'/>
</function-decl>
- <function-decl name='efi_use_whole_disk' mangled-name='efi_use_whole_disk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_props_refresh' mangled-name='zpool_props_refresh' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_props_refresh'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_sendrecv_os.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='fscanf' mangled-name='fscanf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_get_state_str' mangled-name='zpool_get_state_str' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_state_str'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <return type-id='80f4b756'/>
</function-decl>
- <function-decl name='fcntl' mangled-name='fcntl' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zpool_load_compat' mangled-name='zpool_load_compat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_load_compat'>
+ <parameter type-id='80f4b756' name='compat'/>
+ <parameter type-id='37e3bd22' name='features'/>
+ <parameter type-id='26a90f95' name='report'/>
+ <parameter type-id='b59d7dce' name='rlen'/>
+ <return type-id='901b78d1'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_util_os.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zfs_version_kernel' mangled-name='zfs_version_kernel' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_kernel'>
- <parameter type-id='type-id-14' name='version'/>
- <parameter type-id='type-id-2' name='len'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='libzfs_error_init' mangled-name='libzfs_error_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_init'>
- <parameter type-id='type-id-2' name='error'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <class-decl name='zfs_cmd' size-in-bits='109952' is-struct='yes' visibility='default' id='type-id-192'>
+ <abi-instr version='1.0' address-size='64' path='libzfs_sendrecv.c' language='LANG_C99'>
+ <typedef-decl name='recvflags_t' type-id='34a384dc' id='9e59d1d4'/>
+ <class-decl name='recvflags' size-in-bits='416' is-struct='yes' visibility='default' id='34a384dc'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zc_name' type-id='type-id-193' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32768'>
- <var-decl name='zc_nvlist_src' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32832'>
- <var-decl name='zc_nvlist_src_size' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32896'>
- <var-decl name='zc_nvlist_dst' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32960'>
- <var-decl name='zc_nvlist_dst_size' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='33024'>
- <var-decl name='zc_nvlist_dst_filled' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='33056'>
- <var-decl name='zc_pad2' type-id='type-id-2' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='33088'>
- <var-decl name='zc_history' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='33152'>
- <var-decl name='zc_value' type-id='type-id-194' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='98688'>
- <var-decl name='zc_string' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100736'>
- <var-decl name='zc_guid' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100800'>
- <var-decl name='zc_nvlist_conf' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100864'>
- <var-decl name='zc_nvlist_conf_size' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100928'>
- <var-decl name='zc_cookie' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='100992'>
- <var-decl name='zc_objset_type' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101056'>
- <var-decl name='zc_perm_action' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101120'>
- <var-decl name='zc_history_len' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101184'>
- <var-decl name='zc_history_offset' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101248'>
- <var-decl name='zc_obj' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101312'>
- <var-decl name='zc_iflags' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101376'>
- <var-decl name='zc_share' type-id='type-id-195' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='101632'>
- <var-decl name='zc_objset_stats' type-id='type-id-67' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='103936'>
- <var-decl name='zc_begin_record' type-id='type-id-196' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='106368'>
- <var-decl name='zc_inject_record' type-id='type-id-197' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109184'>
- <var-decl name='zc_defer_destroy' type-id='type-id-28' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109216'>
- <var-decl name='zc_flags' type-id='type-id-28' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109248'>
- <var-decl name='zc_action_handle' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109312'>
- <var-decl name='zc_cleanup_fd' type-id='type-id-2' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109344'>
- <var-decl name='zc_simple' type-id='type-id-72' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109352'>
- <var-decl name='zc_pad' type-id='type-id-198' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109376'>
- <var-decl name='zc_sendobj' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109440'>
- <var-decl name='zc_fromobj' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109504'>
- <var-decl name='zc_createtxg' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109568'>
- <var-decl name='zc_stat' type-id='type-id-199' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109888'>
- <var-decl name='zc_zoneid' type-id='type-id-7' visibility='default'/>
+ <var-decl name='verbose' type-id='c19b74c3' visibility='default'/>
</data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='32768' id='type-id-193'>
- <subrange length='4096' type-id='type-id-24' id='type-id-200'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='65536' id='type-id-194'>
- <subrange length='8192' type-id='type-id-24' id='type-id-201'/>
-
- </array-type-def>
- <class-decl name='zfs_share' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-202'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='z_exportdata' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='isprefix' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='z_sharedata' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='z_sharetype' type-id='type-id-7' visibility='default'/>
+ <var-decl name='istail' type-id='c19b74c3' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='z_sharemax' type-id='type-id-7' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='zfs_share_t' type-id='type-id-202' id='type-id-195'/>
- <class-decl name='drr_begin' size-in-bits='2432' is-struct='yes' visibility='default' id='type-id-196'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_magic' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_versioninfo' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='dryrun' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_creation_time' type-id='type-id-7' visibility='default'/>
+ <var-decl name='force' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='canmountoff' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_type' type-id='type-id-71' visibility='default'/>
+ <var-decl name='resumable' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='drr_flags' type-id='type-id-28' visibility='default'/>
+ <var-decl name='byteswap' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_toguid' type-id='type-id-7' visibility='default'/>
+ <var-decl name='nomount' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='holds' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_fromguid' type-id='type-id-7' visibility='default'/>
+ <var-decl name='skipholds' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='352'>
+ <var-decl name='domount' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_toname' type-id='type-id-17' visibility='default'/>
+ <var-decl name='forceunmount' type-id='c19b74c3' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='zinject_record' size-in-bits='2816' is-struct='yes' visibility='default' id='type-id-203'>
+ <typedef-decl name='sendflags_t' type-id='f6aa15be' id='945467e6'/>
+ <class-decl name='sendflags' size-in-bits='544' is-struct='yes' visibility='default' id='f6aa15be'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zi_objset' type-id='type-id-7' visibility='default'/>
+ <var-decl name='verbosity' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='replicate' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zi_object' type-id='type-id-7' visibility='default'/>
+ <var-decl name='skipmissing' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='doall' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zi_start' type-id='type-id-7' visibility='default'/>
+ <var-decl name='fromorigin' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='pad' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='zi_end' type-id='type-id-7' visibility='default'/>
+ <var-decl name='props' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='224'>
+ <var-decl name='dryrun' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='zi_guid' type-id='type-id-7' visibility='default'/>
+ <var-decl name='parsable' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='progress' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='zi_level' type-id='type-id-28' visibility='default'/>
+ <var-decl name='largeblock' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='zi_error' type-id='type-id-28' visibility='default'/>
+ <var-decl name='embed_data' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='zi_type' type-id='type-id-7' visibility='default'/>
+ <var-decl name='compress' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='416'>
+ <var-decl name='raw' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='zi_freq' type-id='type-id-28' visibility='default'/>
+ <var-decl name='backup' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='zi_failfast' type-id='type-id-28' visibility='default'/>
+ <var-decl name='holds' type-id='c19b74c3' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='zi_func' type-id='type-id-17' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2560'>
- <var-decl name='zi_iotype' type-id='type-id-28' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2592'>
- <var-decl name='zi_duration' type-id='type-id-27' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2624'>
- <var-decl name='zi_timer' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2688'>
- <var-decl name='zi_nlanes' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2752'>
- <var-decl name='zi_cmd' type-id='type-id-28' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2784'>
- <var-decl name='zi_dvas' type-id='type-id-28' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='zinject_record_t' type-id='type-id-203' id='type-id-197'/>
-
- <array-type-def dimensions='1' type-id='type-id-72' size-in-bits='24' id='type-id-198'>
- <subrange length='3' type-id='type-id-24' id='type-id-204'/>
-
- </array-type-def>
- <class-decl name='zfs_stat' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-205'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zs_gen' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zs_mode' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zs_links' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='zs_ctime' type-id='type-id-206' visibility='default'/>
+ <var-decl name='saved' type-id='c19b74c3' visibility='default'/>
</data-member>
</class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-7' size-in-bits='128' id='type-id-206'>
- <subrange length='2' type-id='type-id-24' id='type-id-59'/>
-
- </array-type-def>
- <typedef-decl name='zfs_stat_t' type-id='type-id-205' id='type-id-199'/>
- <typedef-decl name='zfs_cmd_t' type-id='type-id-192' id='type-id-207'/>
- <pointer-type-def type-id='type-id-207' size-in-bits='64' id='type-id-208'/>
- <function-decl name='zfs_ioctl' mangled-name='zfs_ioctl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_ioctl'>
- <parameter type-id='type-id-16' name='hdl'/>
- <parameter type-id='type-id-2' name='request'/>
- <parameter type-id='type-id-208' name='zc'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='clock_gettime' mangled-name='clock_gettime' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <typedef-decl name='snapfilter_cb_t' type-id='d2a5e211' id='3d3ffb69'/>
+ <pointer-type-def type-id='f20fbd51' size-in-bits='64' id='a3681dea'/>
+ <pointer-type-def type-id='9e59d1d4' size-in-bits='64' id='4ea84b4f'/>
+ <pointer-type-def type-id='945467e6' size-in-bits='64' id='8def7735'/>
+ <pointer-type-def type-id='3d3ffb69' size-in-bits='64' id='72a26210'/>
+ <function-decl name='zfs_receive' mangled-name='zfs_receive' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_receive'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='tosnap'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='4ea84b4f' name='flags'/>
+ <parameter type-id='95e97e5e' name='infd'/>
+ <parameter type-id='a3681dea' name='stream_avl'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='sched_yield' mangled-name='sched_yield' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_send_one' mangled-name='zfs_send_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_one'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='from'/>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='8def7735' name='flags'/>
+ <parameter type-id='80f4b756' name='redactbook'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='usleep' mangled-name='usleep' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_send' mangled-name='zfs_send' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='fromsnap'/>
+ <parameter type-id='80f4b756' name='tosnap'/>
+ <parameter type-id='8def7735' name='flags'/>
+ <parameter type-id='95e97e5e' name='outfd'/>
+ <parameter type-id='72a26210' name='filter_func'/>
+ <parameter type-id='eaa32e2f' name='cb_arg'/>
+ <parameter type-id='857bb57e' name='debugnvp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='access' mangled-name='access' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_send_saved' mangled-name='zfs_send_saved' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_saved'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='8def7735' name='flags'/>
+ <parameter type-id='95e97e5e' name='outfd'/>
+ <parameter type-id='80f4b756' name='resume_token'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/icp/algs/sha2/sha2.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='htonl' mangled-name='htonl' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_send_resume' mangled-name='zfs_send_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='8def7735' name='flags'/>
+ <parameter type-id='95e97e5e' name='outfd'/>
+ <parameter type-id='80f4b756' name='resume_token'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/cityhash.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='cityhash4' mangled-name='cityhash4' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='cityhash4'>
- <parameter type-id='type-id-7' name='w1'/>
- <parameter type-id='type-id-7' name='w2'/>
- <parameter type-id='type-id-7' name='w3'/>
- <parameter type-id='type-id-7' name='w4'/>
- <return type-id='type-id-7'/>
+ <function-decl name='zfs_send_resume_token_to_nvlist' mangled-name='zfs_send_resume_token_to_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_resume_token_to_nvlist'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='token'/>
+ <return type-id='5ce45b60'/>
</function-decl>
+ <function-decl name='zfs_send_progress' mangled-name='zfs_send_progress' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_send_progress'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='5d6479ae' name='bytes_written'/>
+ <parameter type-id='5d6479ae' name='blocks_visited'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='d2a5e211'>
+ <parameter type-id='9200a744'/>
+ <parameter type-id='eaa32e2f'/>
+ <return type-id='c19b74c3'/>
+ </function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfeature_common.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <var-decl name='zfeature_checks_disable' type-id='type-id-9' mangled-name='zfeature_checks_disable' visibility='default' elf-symbol-id='zfeature_checks_disable'/>
- <class-decl name='zfeature_info' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-209'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='fi_feature' type-id='type-id-210' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='fi_uname' type-id='type-id-84' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='fi_guid' type-id='type-id-84' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='fi_desc' type-id='type-id-84' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='fi_flags' type-id='type-id-211' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='fi_zfs_mod_supported' type-id='type-id-9' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='fi_type' type-id='type-id-212' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='fi_depends' type-id='type-id-213' visibility='default'/>
- </data-member>
- </class-decl>
- <enum-decl name='spa_feature' id='type-id-214'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='SPA_FEATURE_NONE' value='-1'/>
- <enumerator name='SPA_FEATURE_ASYNC_DESTROY' value='0'/>
- <enumerator name='SPA_FEATURE_EMPTY_BPOBJ' value='1'/>
- <enumerator name='SPA_FEATURE_LZ4_COMPRESS' value='2'/>
- <enumerator name='SPA_FEATURE_MULTI_VDEV_CRASH_DUMP' value='3'/>
- <enumerator name='SPA_FEATURE_SPACEMAP_HISTOGRAM' value='4'/>
- <enumerator name='SPA_FEATURE_ENABLED_TXG' value='5'/>
- <enumerator name='SPA_FEATURE_HOLE_BIRTH' value='6'/>
- <enumerator name='SPA_FEATURE_EXTENSIBLE_DATASET' value='7'/>
- <enumerator name='SPA_FEATURE_EMBEDDED_DATA' value='8'/>
- <enumerator name='SPA_FEATURE_BOOKMARKS' value='9'/>
- <enumerator name='SPA_FEATURE_FS_SS_LIMIT' value='10'/>
- <enumerator name='SPA_FEATURE_LARGE_BLOCKS' value='11'/>
- <enumerator name='SPA_FEATURE_LARGE_DNODE' value='12'/>
- <enumerator name='SPA_FEATURE_SHA512' value='13'/>
- <enumerator name='SPA_FEATURE_SKEIN' value='14'/>
- <enumerator name='SPA_FEATURE_EDONR' value='15'/>
- <enumerator name='SPA_FEATURE_USEROBJ_ACCOUNTING' value='16'/>
- <enumerator name='SPA_FEATURE_ENCRYPTION' value='17'/>
- <enumerator name='SPA_FEATURE_PROJECT_QUOTA' value='18'/>
- <enumerator name='SPA_FEATURE_DEVICE_REMOVAL' value='19'/>
- <enumerator name='SPA_FEATURE_OBSOLETE_COUNTS' value='20'/>
- <enumerator name='SPA_FEATURE_POOL_CHECKPOINT' value='21'/>
- <enumerator name='SPA_FEATURE_SPACEMAP_V2' value='22'/>
- <enumerator name='SPA_FEATURE_ALLOCATION_CLASSES' value='23'/>
- <enumerator name='SPA_FEATURE_RESILVER_DEFER' value='24'/>
- <enumerator name='SPA_FEATURE_BOOKMARK_V2' value='25'/>
- <enumerator name='SPA_FEATURE_REDACTION_BOOKMARKS' value='26'/>
- <enumerator name='SPA_FEATURE_REDACTED_DATASETS' value='27'/>
- <enumerator name='SPA_FEATURE_BOOKMARK_WRITTEN' value='28'/>
- <enumerator name='SPA_FEATURE_LOG_SPACEMAP' value='29'/>
- <enumerator name='SPA_FEATURE_LIVELIST' value='30'/>
- <enumerator name='SPA_FEATURE_DEVICE_REBUILD' value='31'/>
- <enumerator name='SPA_FEATURE_ZSTD_COMPRESS' value='32'/>
- <enumerator name='SPA_FEATURE_DRAID' value='33'/>
- <enumerator name='SPA_FEATURES' value='34'/>
- </enum-decl>
- <typedef-decl name='spa_feature_t' type-id='type-id-214' id='type-id-210'/>
- <enum-decl name='zfeature_flags' id='type-id-215'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZFEATURE_FLAG_READONLY_COMPAT' value='1'/>
- <enumerator name='ZFEATURE_FLAG_MOS' value='2'/>
- <enumerator name='ZFEATURE_FLAG_ACTIVATE_ON_ENABLE' value='4'/>
- <enumerator name='ZFEATURE_FLAG_PER_DATASET' value='8'/>
+ <abi-instr version='1.0' address-size='64' path='libzfs_status.c' language='LANG_C99'>
+ <typedef-decl name='zpool_status_t' type-id='08f5ca1e' id='d3dd6294'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca1e'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZPOOL_STATUS_CORRUPT_CACHE' value='0'/>
+ <enumerator name='ZPOOL_STATUS_MISSING_DEV_R' value='1'/>
+ <enumerator name='ZPOOL_STATUS_MISSING_DEV_NR' value='2'/>
+ <enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_R' value='3'/>
+ <enumerator name='ZPOOL_STATUS_CORRUPT_LABEL_NR' value='4'/>
+ <enumerator name='ZPOOL_STATUS_BAD_GUID_SUM' value='5'/>
+ <enumerator name='ZPOOL_STATUS_CORRUPT_POOL' value='6'/>
+ <enumerator name='ZPOOL_STATUS_CORRUPT_DATA' value='7'/>
+ <enumerator name='ZPOOL_STATUS_FAILING_DEV' value='8'/>
+ <enumerator name='ZPOOL_STATUS_VERSION_NEWER' value='9'/>
+ <enumerator name='ZPOOL_STATUS_HOSTID_MISMATCH' value='10'/>
+ <enumerator name='ZPOOL_STATUS_HOSTID_ACTIVE' value='11'/>
+ <enumerator name='ZPOOL_STATUS_HOSTID_REQUIRED' value='12'/>
+ <enumerator name='ZPOOL_STATUS_IO_FAILURE_WAIT' value='13'/>
+ <enumerator name='ZPOOL_STATUS_IO_FAILURE_CONTINUE' value='14'/>
+ <enumerator name='ZPOOL_STATUS_IO_FAILURE_MMP' value='15'/>
+ <enumerator name='ZPOOL_STATUS_BAD_LOG' value='16'/>
+ <enumerator name='ZPOOL_STATUS_ERRATA' value='17'/>
+ <enumerator name='ZPOOL_STATUS_UNSUP_FEAT_READ' value='18'/>
+ <enumerator name='ZPOOL_STATUS_UNSUP_FEAT_WRITE' value='19'/>
+ <enumerator name='ZPOOL_STATUS_FAULTED_DEV_R' value='20'/>
+ <enumerator name='ZPOOL_STATUS_FAULTED_DEV_NR' value='21'/>
+ <enumerator name='ZPOOL_STATUS_VERSION_OLDER' value='22'/>
+ <enumerator name='ZPOOL_STATUS_FEAT_DISABLED' value='23'/>
+ <enumerator name='ZPOOL_STATUS_RESILVERING' value='24'/>
+ <enumerator name='ZPOOL_STATUS_OFFLINE_DEV' value='25'/>
+ <enumerator name='ZPOOL_STATUS_REMOVED_DEV' value='26'/>
+ <enumerator name='ZPOOL_STATUS_REBUILDING' value='27'/>
+ <enumerator name='ZPOOL_STATUS_REBUILD_SCRUB' value='28'/>
+ <enumerator name='ZPOOL_STATUS_NON_NATIVE_ASHIFT' value='29'/>
+ <enumerator name='ZPOOL_STATUS_COMPATIBILITY_ERR' value='30'/>
+ <enumerator name='ZPOOL_STATUS_INCOMPATIBLE_FEAT' value='31'/>
+ <enumerator name='ZPOOL_STATUS_OK' value='32'/>
</enum-decl>
- <typedef-decl name='zfeature_flags_t' type-id='type-id-215' id='type-id-211'/>
- <enum-decl name='zfeature_type' id='type-id-216'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZFEATURE_TYPE_BOOLEAN' value='0'/>
- <enumerator name='ZFEATURE_TYPE_UINT64_ARRAY' value='1'/>
- <enumerator name='ZFEATURE_NUM_TYPES' value='2'/>
+ <typedef-decl name='zpool_errata_t' type-id='d9abbf54' id='688c495b'/>
+ <enum-decl name='zpool_errata' id='d9abbf54'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZPOOL_ERRATA_NONE' value='0'/>
+ <enumerator name='ZPOOL_ERRATA_ZOL_2094_SCRUB' value='1'/>
+ <enumerator name='ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY' value='2'/>
+ <enumerator name='ZPOOL_ERRATA_ZOL_6845_ENCRYPTION' value='3'/>
+ <enumerator name='ZPOOL_ERRATA_ZOL_8308_ENCRYPTION' value='4'/>
</enum-decl>
- <typedef-decl name='zfeature_type_t' type-id='type-id-216' id='type-id-212'/>
- <qualified-type-def type-id='type-id-210' const='yes' id='type-id-217'/>
- <pointer-type-def type-id='type-id-217' size-in-bits='64' id='type-id-213'/>
- <typedef-decl name='zfeature_info_t' type-id='type-id-209' id='type-id-218'/>
-
- <array-type-def dimensions='1' type-id='type-id-218' size-in-bits='15232' id='type-id-219'>
- <subrange length='34' type-id='type-id-24' id='type-id-220'/>
-
- </array-type-def>
- <var-decl name='spa_feature_table' type-id='type-id-219' mangled-name='spa_feature_table' visibility='default' elf-symbol-id='spa_feature_table'/>
- <function-decl name='zfeature_depends_on' mangled-name='zfeature_depends_on' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_depends_on'>
- <parameter type-id='type-id-210' name='fid'/>
- <parameter type-id='type-id-210' name='check'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <pointer-type-def type-id='type-id-210' size-in-bits='64' id='type-id-221'/>
- <function-decl name='zfeature_lookup_name' mangled-name='zfeature_lookup_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_lookup_name'>
- <parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-221' name='res'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfeature_lookup_guid' mangled-name='zfeature_lookup_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_lookup_guid'>
- <parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-221' name='res'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfeature_is_supported' mangled-name='zfeature_is_supported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_supported'>
- <parameter type-id='type-id-84' name='guid'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfeature_is_valid_guid' mangled-name='zfeature_is_valid_guid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfeature_is_valid_guid'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-9'/>
+ <pointer-type-def type-id='688c495b' size-in-bits='64' id='cec6f2e4'/>
+ <function-decl name='zpool_import_status' mangled-name='zpool_import_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_import_status'>
+ <parameter type-id='5ce45b60' name='config'/>
+ <parameter type-id='9b23c9ad' name='msgid'/>
+ <parameter type-id='cec6f2e4' name='errata'/>
+ <return type-id='d3dd6294'/>
</function-decl>
- <function-decl name='zfs_mod_supported' mangled-name='zfs_mod_supported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mod_supported'>
- <parameter type-id='type-id-84' name='scope'/>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-9'/>
+ <function-decl name='zpool_get_status' mangled-name='zpool_get_status' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_status'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='9b23c9ad' name='msgid'/>
+ <parameter type-id='cec6f2e4' name='errata'/>
+ <return type-id='d3dd6294'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_comutil.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
-
- <array-type-def dimensions='1' type-id='type-id-84' size-in-bits='2624' id='type-id-222'>
- <subrange length='41' type-id='type-id-24' id='type-id-223'/>
-
+ <abi-instr version='1.0' address-size='64' path='libzfs_util.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='95e97e5e' size-in-bits='192' id='e41bdf22'>
+ <subrange length='6' type-id='7359adad' id='52fa524b'/>
</array-type-def>
- <var-decl name='zfs_history_event_names' type-id='type-id-222' mangled-name='zfs_history_event_names' visibility='default' elf-symbol-id='zfs_history_event_names'/>
- <function-decl name='zfs_dataset_name_hidden' mangled-name='zfs_dataset_name_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dataset_name_hidden'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_spa_version_map' mangled-name='zfs_spa_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_spa_version_map'>
- <parameter type-id='type-id-2' name='zpl_version'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_zpl_version_map' mangled-name='zfs_zpl_version_map' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_zpl_version_map'>
- <parameter type-id='type-id-2' name='zpl_version'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <class-decl name='zpool_load_policy' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-224'>
+ <type-decl name='variadic parameter type' id='2c1145c5'/>
+ <array-type-def dimensions='1' type-id='19cefcee' size-in-bits='160' alignment-in-bits='32' id='3fcf57d2'>
+ <subrange length='5' type-id='7359adad' id='53010e10'/>
+ </array-type-def>
+ <typedef-decl name='zprop_func' type-id='2e711a2a' id='1ec3747a'/>
+ <typedef-decl name='zprop_get_cbdata_t' type-id='f3d3c319' id='f3d87113'/>
+ <class-decl name='zprop_get_cbdata' size-in-bits='640' is-struct='yes' visibility='default' id='f3d3c319'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zlp_rewind' type-id='type-id-28' visibility='default'/>
+ <var-decl name='cb_sources' type-id='95e97e5e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zlp_maxmeta' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zlp_maxdata' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='cb_columns' type-id='3fcf57d2' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='zlp_txg' type-id='type-id-7' visibility='default'/>
+ <var-decl name='cb_colwidths' type-id='e41bdf22' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='cb_scripted' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='416'>
+ <var-decl name='cb_literal' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='cb_first' type-id='c19b74c3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='cb_proplist' type-id='3a9b2288' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='cb_type' type-id='2e45de5d' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='zpool_load_policy_t' type-id='type-id-224' id='type-id-225'/>
- <pointer-type-def type-id='type-id-225' size-in-bits='64' id='type-id-226'/>
- <function-decl name='zpool_get_load_policy' mangled-name='zpool_get_load_policy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_load_policy'>
- <parameter type-id='type-id-19' name='nvl'/>
- <parameter type-id='type-id-226' name='zlpp'/>
- <return type-id='type-id-1'/>
+ <typedef-decl name='zfs_get_column_t' type-id='08f5ca1f' id='19cefcee'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca1f'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='GET_COL_NONE' value='0'/>
+ <enumerator name='GET_COL_NAME' value='1'/>
+ <enumerator name='GET_COL_PROPERTY' value='2'/>
+ <enumerator name='GET_COL_VALUE' value='3'/>
+ <enumerator name='GET_COL_RECVD' value='4'/>
+ <enumerator name='GET_COL_SOURCE' value='5'/>
+ </enum-decl>
+ <pointer-type-def type-id='9b23c9ad' size-in-bits='64' id='c0563f85'/>
+ <pointer-type-def type-id='c70fa2e8' size-in-bits='64' id='2e711a2a'/>
+ <pointer-type-def type-id='f3d87113' size-in-bits='64' id='0d2a0670'/>
+ <function-decl name='printf_color' mangled-name='printf_color' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='printf_color'>
+ <parameter type-id='26a90f95' name='color'/>
+ <parameter type-id='26a90f95' name='format'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_special_devs' mangled-name='zfs_special_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_special_devs'>
- <parameter type-id='type-id-19' name='nv'/>
- <parameter type-id='type-id-14' name='type'/>
- <return type-id='type-id-9'/>
+ <function-decl name='zfs_version_print' mangled-name='zfs_version_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_print'>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_allocatable_devs' mangled-name='zfs_allocatable_devs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_allocatable_devs'>
- <parameter type-id='type-id-19' name='nv'/>
- <return type-id='type-id-9'/>
+ <function-decl name='zfs_version_userland' mangled-name='zfs_version_userland' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_userland'>
+ <parameter type-id='26a90f95' name='version'/>
+ <parameter type-id='95e97e5e' name='len'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zprop_iter' mangled-name='zprop_iter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter'>
+ <parameter type-id='1ec3747a' name='func'/>
+ <parameter type-id='eaa32e2f' name='cb'/>
+ <parameter type-id='c19b74c3' name='show_all'/>
+ <parameter type-id='c19b74c3' name='ordered'/>
+ <parameter type-id='2e45de5d' name='type'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zprop_free_list' mangled-name='zprop_free_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_free_list'>
+ <parameter type-id='3a9b2288' name='pl'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zprop_get_list' mangled-name='zprop_get_list' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_get_list'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='26a90f95' name='props'/>
+ <parameter type-id='e4378506' name='listp'/>
+ <parameter type-id='2e45de5d' name='type'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zprop_print_one_property' mangled-name='zprop_print_one_property' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_print_one_property'>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='0d2a0670' name='cbp'/>
+ <parameter type-id='80f4b756' name='propname'/>
+ <parameter type-id='80f4b756' name='value'/>
+ <parameter type-id='a2256d42' name='sourcetype'/>
+ <parameter type-id='80f4b756' name='source'/>
+ <parameter type-id='80f4b756' name='recvd_value'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zfs_path_to_zhandle' mangled-name='zfs_path_to_zhandle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_path_to_zhandle'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='2e45de5d' name='argtype'/>
+ <return type-id='9200a744'/>
+ </function-decl>
+ <function-decl name='zfs_get_pool_handle' mangled-name='zfs_get_pool_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_pool_handle'>
+ <parameter type-id='fcd57163' name='zhp'/>
+ <return type-id='4c81de99'/>
+ </function-decl>
+ <function-decl name='zfs_get_handle' mangled-name='zfs_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_handle'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <return type-id='b0382bb3'/>
+ </function-decl>
+ <function-decl name='zpool_get_handle' mangled-name='zpool_get_handle' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_get_handle'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <return type-id='b0382bb3'/>
</function-decl>
- <function-decl name='nvpair_value_uint32' mangled-name='nvpair_value_uint32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='libzfs_fini' mangled-name='libzfs_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_fini'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='libzfs_init' mangled-name='libzfs_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_init'>
+ <return type-id='b0382bb3'/>
+ </function-decl>
+ <function-decl name='libzfs_envvar_is_set' mangled-name='libzfs_envvar_is_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_envvar_is_set'>
+ <parameter type-id='26a90f95' name='envvar'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='libzfs_free_str_array' mangled-name='libzfs_free_str_array' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_free_str_array'>
+ <parameter type-id='9b23c9ad' name='strs'/>
+ <parameter type-id='95e97e5e' name='count'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='libzfs_run_process_get_stdout_nopath' mangled-name='libzfs_run_process_get_stdout_nopath' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout_nopath'>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='9b23c9ad' name='argv'/>
+ <parameter type-id='9b23c9ad' name='env'/>
+ <parameter type-id='c0563f85' name='lines'/>
+ <parameter type-id='7292109c' name='lines_cnt'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='libzfs_run_process_get_stdout' mangled-name='libzfs_run_process_get_stdout' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process_get_stdout'>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='9b23c9ad' name='argv'/>
+ <parameter type-id='9b23c9ad' name='env'/>
+ <parameter type-id='c0563f85' name='lines'/>
+ <parameter type-id='7292109c' name='lines_cnt'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='libzfs_run_process' mangled-name='libzfs_run_process' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_run_process'>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='9b23c9ad' name='argv'/>
+ <parameter type-id='95e97e5e' name='flags'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='libzfs_print_on_error' mangled-name='libzfs_print_on_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_print_on_error'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='c19b74c3' name='printerr'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zfs_standard_error' mangled-name='zfs_standard_error' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_standard_error'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='95e97e5e' name='error'/>
+ <parameter type-id='80f4b756' name='msg'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='libzfs_error_action' mangled-name='libzfs_error_action' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_action'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <return type-id='80f4b756'/>
+ </function-decl>
+ <function-decl name='libzfs_errno' mangled-name='libzfs_errno' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_errno'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='libzfs_error_description' mangled-name='libzfs_error_description' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_description'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <return type-id='80f4b756'/>
+ </function-decl>
+ <function-decl name='zfs_nicestrtonum' mangled-name='zfs_nicestrtonum' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicestrtonum'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='80f4b756' name='value'/>
+ <parameter type-id='5d6479ae' name='num'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='color_start' mangled-name='color_start' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_start'>
+ <parameter type-id='26a90f95' name='color'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='color_end' mangled-name='color_end' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='color_end'>
+ <return type-id='48b5725f'/>
</function-decl>
+ <function-type size-in-bits='64' id='c70fa2e8'>
+ <parameter type-id='95e97e5e'/>
+ <parameter type-id='eaa32e2f'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_deleg.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <class-decl name='zfs_deleg_perm_tab' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-227'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='z_perm' type-id='type-id-14' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='z_note' type-id='type-id-228' visibility='default'/>
- </data-member>
- </class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-229'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZFS_DELEG_NOTE_CREATE' value='0'/>
- <enumerator name='ZFS_DELEG_NOTE_DESTROY' value='1'/>
- <enumerator name='ZFS_DELEG_NOTE_SNAPSHOT' value='2'/>
- <enumerator name='ZFS_DELEG_NOTE_ROLLBACK' value='3'/>
- <enumerator name='ZFS_DELEG_NOTE_CLONE' value='4'/>
- <enumerator name='ZFS_DELEG_NOTE_PROMOTE' value='5'/>
- <enumerator name='ZFS_DELEG_NOTE_RENAME' value='6'/>
- <enumerator name='ZFS_DELEG_NOTE_SEND' value='7'/>
- <enumerator name='ZFS_DELEG_NOTE_RECEIVE' value='8'/>
- <enumerator name='ZFS_DELEG_NOTE_ALLOW' value='9'/>
- <enumerator name='ZFS_DELEG_NOTE_USERPROP' value='10'/>
- <enumerator name='ZFS_DELEG_NOTE_MOUNT' value='11'/>
- <enumerator name='ZFS_DELEG_NOTE_SHARE' value='12'/>
- <enumerator name='ZFS_DELEG_NOTE_USERQUOTA' value='13'/>
- <enumerator name='ZFS_DELEG_NOTE_GROUPQUOTA' value='14'/>
- <enumerator name='ZFS_DELEG_NOTE_USERUSED' value='15'/>
- <enumerator name='ZFS_DELEG_NOTE_GROUPUSED' value='16'/>
- <enumerator name='ZFS_DELEG_NOTE_USEROBJQUOTA' value='17'/>
- <enumerator name='ZFS_DELEG_NOTE_GROUPOBJQUOTA' value='18'/>
- <enumerator name='ZFS_DELEG_NOTE_USEROBJUSED' value='19'/>
- <enumerator name='ZFS_DELEG_NOTE_GROUPOBJUSED' value='20'/>
- <enumerator name='ZFS_DELEG_NOTE_HOLD' value='21'/>
- <enumerator name='ZFS_DELEG_NOTE_RELEASE' value='22'/>
- <enumerator name='ZFS_DELEG_NOTE_DIFF' value='23'/>
- <enumerator name='ZFS_DELEG_NOTE_BOOKMARK' value='24'/>
- <enumerator name='ZFS_DELEG_NOTE_LOAD_KEY' value='25'/>
- <enumerator name='ZFS_DELEG_NOTE_CHANGE_KEY' value='26'/>
- <enumerator name='ZFS_DELEG_NOTE_PROJECTUSED' value='27'/>
- <enumerator name='ZFS_DELEG_NOTE_PROJECTQUOTA' value='28'/>
- <enumerator name='ZFS_DELEG_NOTE_PROJECTOBJUSED' value='29'/>
- <enumerator name='ZFS_DELEG_NOTE_PROJECTOBJQUOTA' value='30'/>
- <enumerator name='ZFS_DELEG_NOTE_NONE' value='31'/>
- </enum-decl>
- <typedef-decl name='zfs_deleg_note_t' type-id='type-id-229' id='type-id-228'/>
- <typedef-decl name='zfs_deleg_perm_tab_t' type-id='type-id-227' id='type-id-230'/>
-
- <array-type-def dimensions='1' type-id='type-id-230' size-in-bits='128' id='type-id-231'>
- <subrange length='1' id='type-id-232'/>
-
- </array-type-def>
- <var-decl name='zfs_deleg_perm_tab' type-id='type-id-231' mangled-name='zfs_deleg_perm_tab' visibility='default' elf-symbol-id='zfs_deleg_perm_tab'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-233'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZFS_DELEG_WHO_UNKNOWN' value='0'/>
- <enumerator name='ZFS_DELEG_USER' value='117'/>
- <enumerator name='ZFS_DELEG_USER_SETS' value='85'/>
- <enumerator name='ZFS_DELEG_GROUP' value='103'/>
- <enumerator name='ZFS_DELEG_GROUP_SETS' value='71'/>
- <enumerator name='ZFS_DELEG_EVERYONE' value='101'/>
- <enumerator name='ZFS_DELEG_EVERYONE_SETS' value='69'/>
- <enumerator name='ZFS_DELEG_CREATE' value='99'/>
- <enumerator name='ZFS_DELEG_CREATE_SETS' value='67'/>
- <enumerator name='ZFS_DELEG_NAMED_SET' value='115'/>
- <enumerator name='ZFS_DELEG_NAMED_SET_SETS' value='83'/>
- </enum-decl>
- <typedef-decl name='zfs_deleg_who_type_t' type-id='type-id-233' id='type-id-234'/>
- <function-decl name='zfs_deleg_whokey' mangled-name='zfs_deleg_whokey' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_whokey'>
- <parameter type-id='type-id-14' name='attr'/>
- <parameter type-id='type-id-234' name='type'/>
- <parameter type-id='type-id-23' name='inheritchr'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-1'/>
+ <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_mount_os.c' language='LANG_C99'>
+ <pointer-type-def type-id='7359adad' size-in-bits='64' id='1d2c2b85'/>
+ <function-decl name='zpool_disable_volume_os' mangled-name='zpool_disable_volume_os' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disable_volume_os'>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zfs_deleg_verify_nvlist' mangled-name='zfs_deleg_verify_nvlist' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_verify_nvlist'>
- <parameter type-id='type-id-19' name='nvp'/>
- <return type-id='type-id-2'/>
+ <function-decl name='zpool_disable_datasets_os' mangled-name='zpool_disable_datasets_os' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_disable_datasets_os'>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='c19b74c3' name='force'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zfs_deleg_canonicalize_perm' mangled-name='zfs_deleg_canonicalize_perm' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_deleg_canonicalize_perm'>
- <parameter type-id='type-id-84' name='perm'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_mount_delegation_check' mangled-name='zfs_mount_delegation_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_mount_delegation_check'>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zfs_adjust_mount_options' mangled-name='zfs_adjust_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_adjust_mount_options'>
+ <parameter type-id='9200a744' name='zhp'/>
+ <parameter type-id='80f4b756' name='mntpoint'/>
+ <parameter type-id='26a90f95' name='mntopts'/>
+ <parameter type-id='26a90f95' name='mtabopt'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='permset_namecheck' mangled-name='permset_namecheck' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <function-decl name='zfs_parse_mount_options' mangled-name='zfs_parse_mount_options' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_parse_mount_options'>
+ <parameter type-id='26a90f95' name='mntopts'/>
+ <parameter type-id='1d2c2b85' name='mntflags'/>
+ <parameter type-id='1d2c2b85' name='zfsflags'/>
+ <parameter type-id='95e97e5e' name='sloppy'/>
+ <parameter type-id='26a90f95' name='badopt'/>
+ <parameter type-id='26a90f95' name='mtabopt'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='zfs_prop_delegatable' mangled-name='zfs_prop_delegatable' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_pool_os.c' language='LANG_C99'>
+ <function-decl name='zpool_label_disk' mangled-name='zpool_label_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='4c81de99' name='zhp'/>
+ <parameter type-id='80f4b756' name='name'/>
+ <return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <class-decl name='zio_abd_checksum_func' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-235'>
+ <abi-instr version='1.0' address-size='64' path='os/linux/libzfs_util_os.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='32768' id='d16c6df4'>
+ <subrange length='4096' type-id='7359adad' id='bc1b5ddc'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='65536' id='163f6aa5'>
+ <subrange length='8192' type-id='7359adad' id='c88f397d'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='128' id='c1c22e6c'>
+ <subrange length='2' type-id='7359adad' id='52efc4ef'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='b96825af' size-in-bits='24' id='d3490169'>
+ <subrange length='3' type-id='7359adad' id='56f209d2'/>
+ </array-type-def>
+ <typedef-decl name='zfs_cmd_t' type-id='3522cd69' id='a5559cdd'/>
+ <class-decl name='zfs_cmd' size-in-bits='109952' is-struct='yes' visibility='default' id='3522cd69'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='acf_init' type-id='type-id-236' visibility='default'/>
+ <var-decl name='zc_name' type-id='d16c6df4' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='acf_fini' type-id='type-id-237' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32768'>
+ <var-decl name='zc_nvlist_src' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='acf_iter' type-id='type-id-238' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32832'>
+ <var-decl name='zc_nvlist_src_size' type-id='9c313c2d' visibility='default'/>
</data-member>
- </class-decl>
- <class-decl name='zio_abd_checksum_data' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-239'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='acd_byteorder' type-id='type-id-240' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32896'>
+ <var-decl name='zc_nvlist_dst' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='acd_ctx' type-id='type-id-241' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32960'>
+ <var-decl name='zc_nvlist_dst_size' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='acd_zcp' type-id='type-id-242' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33024'>
+ <var-decl name='zc_nvlist_dst_filled' type-id='c19b74c3' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='acd_private' type-id='type-id-13' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33056'>
+ <var-decl name='zc_pad2' type-id='95e97e5e' visibility='default'/>
</data-member>
- </class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-243'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='ZIO_CHECKSUM_NATIVE' value='0'/>
- <enumerator name='ZIO_CHECKSUM_BYTESWAP' value='1'/>
- </enum-decl>
- <typedef-decl name='zio_byteorder_t' type-id='type-id-243' id='type-id-240'/>
- <union-decl name='fletcher_4_ctx' size-in-bits='2048' visibility='default' id='type-id-244'>
- <data-member access='private'>
- <var-decl name='scalar' type-id='type-id-245' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33088'>
+ <var-decl name='zc_history' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='private'>
- <var-decl name='superscalar' type-id='type-id-246' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33152'>
+ <var-decl name='zc_value' type-id='163f6aa5' visibility='default'/>
</data-member>
- <data-member access='private'>
- <var-decl name='sse' type-id='type-id-247' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='98688'>
+ <var-decl name='zc_string' type-id='d1617432' visibility='default'/>
</data-member>
- <data-member access='private'>
- <var-decl name='avx' type-id='type-id-248' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='100736'>
+ <var-decl name='zc_guid' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='private'>
- <var-decl name='avx512' type-id='type-id-249' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='100800'>
+ <var-decl name='zc_nvlist_conf' type-id='9c313c2d' visibility='default'/>
</data-member>
- </union-decl>
- <class-decl name='zio_cksum' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-250'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zc_word' type-id='type-id-251' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='100864'>
+ <var-decl name='zc_nvlist_conf_size' type-id='9c313c2d' visibility='default'/>
</data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-7' size-in-bits='256' id='type-id-251'>
- <subrange length='4' type-id='type-id-24' id='type-id-252'/>
-
- </array-type-def>
- <typedef-decl name='zio_cksum_t' type-id='type-id-250' id='type-id-245'/>
- <class-decl name='zfs_fletcher_superscalar' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-253'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='v' type-id='type-id-251' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='100928'>
+ <var-decl name='zc_cookie' type-id='9c313c2d' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='zfs_fletcher_superscalar_t' type-id='type-id-253' id='type-id-254'/>
-
- <array-type-def dimensions='1' type-id='type-id-254' size-in-bits='1024' id='type-id-246'>
- <subrange length='4' type-id='type-id-24' id='type-id-252'/>
-
- </array-type-def>
- <class-decl name='zfs_fletcher_sse' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-255'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='v' type-id='type-id-206' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='100992'>
+ <var-decl name='zc_objset_type' type-id='9c313c2d' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='zfs_fletcher_sse_t' type-id='type-id-255' id='type-id-256'/>
-
- <array-type-def dimensions='1' type-id='type-id-256' size-in-bits='512' id='type-id-247'>
- <subrange length='4' type-id='type-id-24' id='type-id-252'/>
-
- </array-type-def>
- <class-decl name='zfs_fletcher_avx' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-257'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='v' type-id='type-id-251' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='101056'>
+ <var-decl name='zc_perm_action' type-id='9c313c2d' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='zfs_fletcher_avx_t' type-id='type-id-257' id='type-id-258'/>
-
- <array-type-def dimensions='1' type-id='type-id-258' size-in-bits='1024' id='type-id-248'>
- <subrange length='4' type-id='type-id-24' id='type-id-252'/>
-
- </array-type-def>
- <class-decl name='zfs_fletcher_avx512' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-259'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='v' type-id='type-id-260' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='101120'>
+ <var-decl name='zc_history_len' type-id='9c313c2d' visibility='default'/>
</data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-7' size-in-bits='512' id='type-id-260'>
- <subrange length='8' type-id='type-id-24' id='type-id-261'/>
-
- </array-type-def>
- <typedef-decl name='zfs_fletcher_avx512_t' type-id='type-id-259' id='type-id-262'/>
-
- <array-type-def dimensions='1' type-id='type-id-262' size-in-bits='2048' id='type-id-249'>
- <subrange length='4' type-id='type-id-24' id='type-id-252'/>
-
- </array-type-def>
- <typedef-decl name='fletcher_4_ctx_t' type-id='type-id-244' id='type-id-263'/>
- <pointer-type-def type-id='type-id-263' size-in-bits='64' id='type-id-241'/>
- <pointer-type-def type-id='type-id-245' size-in-bits='64' id='type-id-242'/>
- <typedef-decl name='zio_abd_checksum_data_t' type-id='type-id-239' id='type-id-264'/>
- <pointer-type-def type-id='type-id-264' size-in-bits='64' id='type-id-265'/>
- <typedef-decl name='zio_abd_checksum_init_t' type-id='type-id-266' id='type-id-267'/>
- <pointer-type-def type-id='type-id-267' size-in-bits='64' id='type-id-236'/>
- <typedef-decl name='zio_abd_checksum_fini_t' type-id='type-id-266' id='type-id-268'/>
- <pointer-type-def type-id='type-id-268' size-in-bits='64' id='type-id-237'/>
- <typedef-decl name='zio_abd_checksum_iter_t' type-id='type-id-269' id='type-id-270'/>
- <pointer-type-def type-id='type-id-270' size-in-bits='64' id='type-id-238'/>
- <qualified-type-def type-id='type-id-235' const='yes' id='type-id-271'/>
- <typedef-decl name='zio_abd_checksum_func_t' type-id='type-id-271' id='type-id-272'/>
- <var-decl name='fletcher_4_abd_ops' type-id='type-id-272' mangled-name='fletcher_4_abd_ops' visibility='default' elf-symbol-id='fletcher_4_abd_ops'/>
- <function-decl name='fletcher_4_incremental_byteswap' mangled-name='fletcher_4_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_incremental_byteswap'>
- <parameter type-id='type-id-13' name='buf'/>
- <parameter type-id='type-id-18' name='size'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='fletcher_4_native_varsize' mangled-name='fletcher_4_native_varsize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native_varsize'>
- <parameter type-id='type-id-13' name='buf'/>
- <parameter type-id='type-id-7' name='size'/>
- <parameter type-id='type-id-242' name='zcp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fletcher_4_impl_set' mangled-name='fletcher_4_impl_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_impl_set'>
- <parameter type-id='type-id-84' name='val'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='fletcher_2_byteswap' mangled-name='fletcher_2_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_byteswap'>
- <parameter type-id='type-id-13' name='buf'/>
- <parameter type-id='type-id-7' name='size'/>
- <parameter type-id='type-id-13' name='ctx_template'/>
- <parameter type-id='type-id-242' name='zcp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fletcher_2_incremental_byteswap' mangled-name='fletcher_2_incremental_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_byteswap'>
- <parameter type-id='type-id-13' name='buf'/>
- <parameter type-id='type-id-18' name='size'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='fletcher_2_native' mangled-name='fletcher_2_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_native'>
- <parameter type-id='type-id-13' name='buf'/>
- <parameter type-id='type-id-7' name='size'/>
- <parameter type-id='type-id-13' name='ctx_template'/>
- <parameter type-id='type-id-242' name='zcp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fletcher_2_incremental_native' mangled-name='fletcher_2_incremental_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_2_incremental_native'>
- <parameter type-id='type-id-13' name='buf'/>
- <parameter type-id='type-id-18' name='size'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='fletcher_init' mangled-name='fletcher_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_init'>
- <parameter type-id='type-id-242' name='zcp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fletcher_4_native' mangled-name='fletcher_4_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_native'>
- <parameter type-id='type-id-13' name='buf'/>
- <parameter type-id='type-id-7' name='size'/>
- <parameter type-id='type-id-13' name='ctx_template'/>
- <parameter type-id='type-id-242' name='zcp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fletcher_4_byteswap' mangled-name='fletcher_4_byteswap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_byteswap'>
- <parameter type-id='type-id-13' name='buf'/>
- <parameter type-id='type-id-7' name='size'/>
- <parameter type-id='type-id-13' name='ctx_template'/>
- <parameter type-id='type-id-242' name='zcp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fletcher_4_incremental_native' mangled-name='fletcher_4_incremental_native' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='fletcher_4_incremental_native'>
- <parameter type-id='type-id-13' name='buf'/>
- <parameter type-id='type-id-18' name='size'/>
- <parameter type-id='type-id-13' name='data'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='membar_producer' mangled-name='membar_producer' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_swap_32' mangled-name='atomic_swap_32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-type size-in-bits='64' id='type-id-269'>
- <parameter type-id='type-id-13'/>
- <parameter type-id='type-id-18'/>
- <parameter type-id='type-id-13'/>
- <return type-id='type-id-2'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-266'>
- <parameter type-id='type-id-265'/>
- <return type-id='type-id-1'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_avx512.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <class-decl name='fletcher_4_func' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-273'>
+ <data-member access='public' layout-offset-in-bits='101184'>
+ <var-decl name='zc_history_offset' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101248'>
+ <var-decl name='zc_obj' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101312'>
+ <var-decl name='zc_iflags' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101376'>
+ <var-decl name='zc_share' type-id='ee5cec36' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101632'>
+ <var-decl name='zc_objset_stats' type-id='b2c14f17' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='103936'>
+ <var-decl name='zc_begin_record' type-id='09fcdc01' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='106368'>
+ <var-decl name='zc_inject_record' type-id='a4301ca6' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109184'>
+ <var-decl name='zc_defer_destroy' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109216'>
+ <var-decl name='zc_flags' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109248'>
+ <var-decl name='zc_action_handle' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109312'>
+ <var-decl name='zc_cleanup_fd' type-id='95e97e5e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109344'>
+ <var-decl name='zc_simple' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109352'>
+ <var-decl name='zc_pad' type-id='d3490169' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109376'>
+ <var-decl name='zc_sendobj' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109440'>
+ <var-decl name='zc_fromobj' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109504'>
+ <var-decl name='zc_createtxg' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109568'>
+ <var-decl name='zc_stat' type-id='0371a9c7' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='109888'>
+ <var-decl name='zc_zoneid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='zfs_share_t' type-id='feb6f2da' id='ee5cec36'/>
+ <class-decl name='zfs_share' size-in-bits='256' is-struct='yes' visibility='default' id='feb6f2da'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='z_exportdata' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='z_sharedata' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='z_sharetype' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='z_sharemax' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='drr_begin' size-in-bits='2432' is-struct='yes' visibility='default' id='09fcdc01'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='init_native' type-id='type-id-274' visibility='default'/>
+ <var-decl name='drr_magic' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='fini_native' type-id='type-id-275' visibility='default'/>
+ <var-decl name='drr_versioninfo' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='compute_native' type-id='type-id-276' visibility='default'/>
+ <var-decl name='drr_creation_time' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='init_byteswap' type-id='type-id-274' visibility='default'/>
+ <var-decl name='drr_type' type-id='230f1e16' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='224'>
+ <var-decl name='drr_flags' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='fini_byteswap' type-id='type-id-275' visibility='default'/>
+ <var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='compute_byteswap' type-id='type-id-276' visibility='default'/>
+ <var-decl name='drr_fromguid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='valid' type-id='type-id-277' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='name' type-id='type-id-84' visibility='default'/>
+ <var-decl name='drr_toname' type-id='d1617432' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-278' size-in-bits='64' id='type-id-279'/>
- <typedef-decl name='fletcher_4_init_f' type-id='type-id-279' id='type-id-274'/>
- <pointer-type-def type-id='type-id-280' size-in-bits='64' id='type-id-281'/>
- <typedef-decl name='fletcher_4_fini_f' type-id='type-id-281' id='type-id-275'/>
- <pointer-type-def type-id='type-id-282' size-in-bits='64' id='type-id-283'/>
- <typedef-decl name='fletcher_4_compute_f' type-id='type-id-283' id='type-id-276'/>
- <pointer-type-def type-id='type-id-284' size-in-bits='64' id='type-id-277'/>
- <typedef-decl name='fletcher_4_ops_t' type-id='type-id-273' id='type-id-285'/>
- <qualified-type-def type-id='type-id-285' const='yes' id='type-id-286'/>
- <var-decl name='fletcher_4_avx512f_ops' type-id='type-id-286' mangled-name='fletcher_4_avx512f_ops' visibility='default' elf-symbol-id='fletcher_4_avx512f_ops'/>
- <var-decl name='fletcher_4_avx512bw_ops' type-id='type-id-286' mangled-name='fletcher_4_avx512bw_ops' visibility='default' elf-symbol-id='fletcher_4_avx512bw_ops'/>
- <function-type size-in-bits='64' id='type-id-284'>
- <return type-id='type-id-9'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-278'>
- <parameter type-id='type-id-241'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-282'>
- <parameter type-id='type-id-241'/>
- <parameter type-id='type-id-13'/>
- <parameter type-id='type-id-7'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-280'>
- <parameter type-id='type-id-241'/>
- <parameter type-id='type-id-242'/>
- <return type-id='type-id-1'/>
- </function-type>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_intel.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <var-decl name='fletcher_4_avx2_ops' type-id='type-id-286' mangled-name='fletcher_4_avx2_ops' visibility='default' elf-symbol-id='fletcher_4_avx2_ops'/>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_sse.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <var-decl name='fletcher_4_sse2_ops' type-id='type-id-286' mangled-name='fletcher_4_sse2_ops' visibility='default' elf-symbol-id='fletcher_4_sse2_ops'/>
- <var-decl name='fletcher_4_ssse3_ops' type-id='type-id-286' mangled-name='fletcher_4_ssse3_ops' visibility='default' elf-symbol-id='fletcher_4_ssse3_ops'/>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_superscalar.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <var-decl name='fletcher_4_superscalar_ops' type-id='type-id-286' mangled-name='fletcher_4_superscalar_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar_ops'/>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_fletcher_superscalar4.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <var-decl name='fletcher_4_superscalar4_ops' type-id='type-id-286' mangled-name='fletcher_4_superscalar4_ops' visibility='default' elf-symbol-id='fletcher_4_superscalar4_ops'/>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_namecheck.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <var-decl name='zfs_max_dataset_nesting' type-id='type-id-2' mangled-name='zfs_max_dataset_nesting' visibility='default' elf-symbol-id='zfs_max_dataset_nesting'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-287'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='NAME_ERR_LEADING_SLASH' value='0'/>
- <enumerator name='NAME_ERR_EMPTY_COMPONENT' value='1'/>
- <enumerator name='NAME_ERR_TRAILING_SLASH' value='2'/>
- <enumerator name='NAME_ERR_INVALCHAR' value='3'/>
- <enumerator name='NAME_ERR_MULTIPLE_DELIMITERS' value='4'/>
- <enumerator name='NAME_ERR_NOLETTER' value='5'/>
- <enumerator name='NAME_ERR_RESERVED' value='6'/>
- <enumerator name='NAME_ERR_DISKLIKE' value='7'/>
- <enumerator name='NAME_ERR_TOOLONG' value='8'/>
- <enumerator name='NAME_ERR_SELF_REF' value='9'/>
- <enumerator name='NAME_ERR_PARENT_REF' value='10'/>
- <enumerator name='NAME_ERR_NO_AT' value='11'/>
- <enumerator name='NAME_ERR_NO_POUND' value='12'/>
- </enum-decl>
- <typedef-decl name='namecheck_err_t' type-id='type-id-287' id='type-id-288'/>
- <pointer-type-def type-id='type-id-288' size-in-bits='64' id='type-id-289'/>
- <function-decl name='pool_namecheck' mangled-name='pool_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='pool_namecheck'>
- <parameter type-id='type-id-84' name='pool'/>
- <parameter type-id='type-id-289' name='why'/>
- <parameter type-id='type-id-14' name='what'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='mountpoint_namecheck' mangled-name='mountpoint_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mountpoint_namecheck'>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-289' name='why'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='snapshot_namecheck' mangled-name='snapshot_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='snapshot_namecheck'>
- <parameter type-id='type-id-84' name='pool'/>
- <parameter type-id='type-id-289' name='why'/>
- <parameter type-id='type-id-14' name='what'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='bookmark_namecheck' mangled-name='bookmark_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='bookmark_namecheck'>
- <parameter type-id='type-id-84' name='pool'/>
- <parameter type-id='type-id-289' name='why'/>
- <parameter type-id='type-id-14' name='what'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='dataset_namecheck' mangled-name='dataset_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dataset_namecheck'>
- <parameter type-id='type-id-84' name='pool'/>
- <parameter type-id='type-id-289' name='why'/>
- <parameter type-id='type-id-14' name='what'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='dataset_nestcheck' mangled-name='dataset_nestcheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='dataset_nestcheck'>
- <parameter type-id='type-id-84' name='path'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='permset_namecheck' mangled-name='permset_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='permset_namecheck'>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-289' name='why'/>
- <parameter type-id='type-id-14' name='what'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='get_dataset_depth' mangled-name='get_dataset_depth' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_dataset_depth'>
- <parameter type-id='type-id-84' name='path'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_component_namecheck' mangled-name='zfs_component_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_component_namecheck'>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-289' name='why'/>
- <parameter type-id='type-id-14' name='what'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='entity_namecheck' mangled-name='entity_namecheck' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='entity_namecheck'>
- <parameter type-id='type-id-84' name='path'/>
- <parameter type-id='type-id-289' name='why'/>
- <parameter type-id='type-id-14' name='what'/>
- <return type-id='type-id-2'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zfs_prop.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
-
- <array-type-def dimensions='1' type-id='type-id-84' size-in-bits='768' id='type-id-290'>
- <subrange length='12' type-id='type-id-24' id='type-id-291'/>
-
- </array-type-def>
- <var-decl name='zfs_userquota_prop_prefixes' type-id='type-id-290' mangled-name='zfs_userquota_prop_prefixes' visibility='default' elf-symbol-id='zfs_userquota_prop_prefixes'/>
- <function-decl name='zfs_prop_align_right' mangled-name='zfs_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_align_right'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_prop_column_name' mangled-name='zfs_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_column_name'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_prop_is_string' mangled-name='zfs_prop_is_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_is_string'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_prop_values' mangled-name='zfs_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_values'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_prop_valid_keylocation' mangled-name='zfs_prop_valid_keylocation' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_keylocation'>
- <parameter type-id='type-id-84' name='str'/>
- <parameter type-id='type-id-9' name='encrypted'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_prop_encryption_key_param' mangled-name='zfs_prop_encryption_key_param' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_encryption_key_param'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_prop_inheritable' mangled-name='zfs_prop_inheritable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_inheritable'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_prop_to_name' mangled-name='zfs_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_to_name'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_prop_default_numeric' mangled-name='zfs_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_numeric'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <function-decl name='zfs_prop_default_string' mangled-name='zfs_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_default_string'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_prop_setonce' mangled-name='zfs_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_setonce'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_prop_visible' mangled-name='zfs_prop_visible' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_visible'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_prop_readonly' mangled-name='zfs_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_readonly'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-292'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='PROP_TYPE_NUMBER' value='0'/>
- <enumerator name='PROP_TYPE_STRING' value='1'/>
- <enumerator name='PROP_TYPE_INDEX' value='2'/>
- </enum-decl>
- <typedef-decl name='zprop_type_t' type-id='type-id-292' id='type-id-293'/>
- <function-decl name='zfs_prop_get_type' mangled-name='zfs_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_type'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-293'/>
- </function-decl>
- <function-decl name='zfs_prop_valid_for_type' mangled-name='zfs_prop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_valid_for_type'>
- <parameter type-id='type-id-2' name='prop'/>
- <parameter type-id='type-id-66' name='types'/>
- <parameter type-id='type-id-9' name='headcheck'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_prop_random_value' mangled-name='zfs_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_random_value'>
- <parameter type-id='type-id-110' name='prop'/>
- <parameter type-id='type-id-7' name='seed'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <pointer-type-def type-id='type-id-84' size-in-bits='64' id='type-id-294'/>
- <function-decl name='zfs_prop_index_to_string' mangled-name='zfs_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_index_to_string'>
- <parameter type-id='type-id-110' name='prop'/>
- <parameter type-id='type-id-7' name='index'/>
- <parameter type-id='type-id-294' name='string'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_prop_string_to_index' mangled-name='zfs_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_string_to_index'>
- <parameter type-id='type-id-110' name='prop'/>
- <parameter type-id='type-id-84' name='string'/>
- <parameter type-id='type-id-108' name='index'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zfs_prop_written' mangled-name='zfs_prop_written' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_written'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_prop_userquota' mangled-name='zfs_prop_userquota' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_userquota'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_prop_user' mangled-name='zfs_prop_user' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_user'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zfs_name_to_prop' mangled-name='zfs_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_name_to_prop'>
- <parameter type-id='type-id-84' name='propname'/>
- <return type-id='type-id-110'/>
- </function-decl>
- <function-decl name='zfs_prop_delegatable' mangled-name='zfs_prop_delegatable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_delegatable'>
- <parameter type-id='type-id-110' name='prop'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <class-decl name='__anonymous_struct__' size-in-bits='704' is-struct='yes' is-anonymous='yes' naming-typedef-id='type-id-295' visibility='default' id='type-id-296'>
+ <typedef-decl name='zinject_record_t' type-id='3216f820' id='a4301ca6'/>
+ <class-decl name='zinject_record' size-in-bits='2816' is-struct='yes' visibility='default' id='3216f820'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='pd_name' type-id='type-id-84' visibility='default'/>
+ <var-decl name='zi_objset' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='pd_propnum' type-id='type-id-2' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='pd_proptype' type-id='type-id-293' visibility='default'/>
+ <var-decl name='zi_object' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='pd_strdefault' type-id='type-id-84' visibility='default'/>
+ <var-decl name='zi_start' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='pd_numdefault' type-id='type-id-7' visibility='default'/>
+ <var-decl name='zi_end' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='pd_attr' type-id='type-id-297' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='pd_types' type-id='type-id-2' visibility='default'/>
+ <var-decl name='zi_guid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='pd_values' type-id='type-id-84' visibility='default'/>
+ <var-decl name='zi_level' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='352'>
+ <var-decl name='zi_error' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='pd_colname' type-id='type-id-84' visibility='default'/>
+ <var-decl name='zi_type' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='pd_rightalign' type-id='type-id-9' visibility='default'/>
+ <var-decl name='zi_freq' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='pd_visible' type-id='type-id-9' visibility='default'/>
+ <var-decl name='zi_failfast' type-id='8f92235e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='pd_zfs_mod_supported' type-id='type-id-9' visibility='default'/>
+ <var-decl name='zi_func' type-id='d1617432' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='pd_table' type-id='type-id-298' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2560'>
+ <var-decl name='zi_iotype' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='pd_table_size' type-id='type-id-18' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2592'>
+ <var-decl name='zi_duration' type-id='3ff5601b' visibility='default'/>
</data-member>
- </class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-299'>
- <underlying-type type-id='type-id-41'/>
- <enumerator name='PROP_DEFAULT' value='0'/>
- <enumerator name='PROP_READONLY' value='1'/>
- <enumerator name='PROP_INHERIT' value='2'/>
- <enumerator name='PROP_ONETIME' value='3'/>
- <enumerator name='PROP_ONETIME_DEFAULT' value='4'/>
- </enum-decl>
- <typedef-decl name='zprop_attr_t' type-id='type-id-299' id='type-id-297'/>
- <class-decl name='zfs_index' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-300'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='pi_name' type-id='type-id-84' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2624'>
+ <var-decl name='zi_timer' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='pi_value' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2688'>
+ <var-decl name='zi_nlanes' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2752'>
+ <var-decl name='zi_cmd' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2784'>
+ <var-decl name='zi_dvas' type-id='8f92235e' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='zprop_index_t' type-id='type-id-300' id='type-id-301'/>
- <qualified-type-def type-id='type-id-301' const='yes' id='type-id-302'/>
- <pointer-type-def type-id='type-id-302' size-in-bits='64' id='type-id-298'/>
- <typedef-decl name='zprop_desc_t' type-id='type-id-296' id='type-id-295'/>
- <pointer-type-def type-id='type-id-295' size-in-bits='64' id='type-id-303'/>
- <function-decl name='zfs_prop_get_table' mangled-name='zfs_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_prop_get_table'>
- <return type-id='type-id-303'/>
- </function-decl>
- <function-decl name='zprop_random_value' mangled-name='zprop_random_value' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zprop_index_to_string' mangled-name='zprop_index_to_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zprop_register_index' mangled-name='zprop_register_index' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zprop_register_string' mangled-name='zprop_register_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zprop_register_number' mangled-name='zprop_register_number' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zprop_register_hidden' mangled-name='zprop_register_hidden' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zprop_register_impl' mangled-name='zprop_register_impl' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zpool_prop.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zpool_prop_align_right' mangled-name='zpool_prop_align_right' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_align_right'>
- <parameter type-id='type-id-160' name='prop'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zpool_prop_column_name' mangled-name='zpool_prop_column_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_column_name'>
- <parameter type-id='type-id-160' name='prop'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zpool_prop_values' mangled-name='zpool_prop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_values'>
- <parameter type-id='type-id-160' name='prop'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zpool_prop_random_value' mangled-name='zpool_prop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_random_value'>
- <parameter type-id='type-id-160' name='prop'/>
- <parameter type-id='type-id-7' name='seed'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <function-decl name='zpool_prop_index_to_string' mangled-name='zpool_prop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_index_to_string'>
- <parameter type-id='type-id-160' name='prop'/>
- <parameter type-id='type-id-7' name='index'/>
- <parameter type-id='type-id-294' name='string'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_prop_string_to_index' mangled-name='zpool_prop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_string_to_index'>
- <parameter type-id='type-id-160' name='prop'/>
- <parameter type-id='type-id-84' name='string'/>
- <parameter type-id='type-id-108' name='index'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zpool_prop_unsupported' mangled-name='zpool_prop_unsupported' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_unsupported'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zpool_prop_feature' mangled-name='zpool_prop_feature' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_feature'>
- <parameter type-id='type-id-84' name='name'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zpool_prop_default_numeric' mangled-name='zpool_prop_default_numeric' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_numeric'>
- <parameter type-id='type-id-160' name='prop'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <function-decl name='zpool_prop_default_string' mangled-name='zpool_prop_default_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_default_string'>
- <parameter type-id='type-id-160' name='prop'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zpool_prop_setonce' mangled-name='zpool_prop_setonce' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_setonce'>
- <parameter type-id='type-id-160' name='prop'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zpool_prop_readonly' mangled-name='zpool_prop_readonly' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_readonly'>
- <parameter type-id='type-id-160' name='prop'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zpool_prop_get_type' mangled-name='zpool_prop_get_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_type'>
- <parameter type-id='type-id-160' name='prop'/>
- <return type-id='type-id-293'/>
- </function-decl>
- <function-decl name='zpool_prop_to_name' mangled-name='zpool_prop_to_name' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_to_name'>
- <parameter type-id='type-id-160' name='prop'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zpool_name_to_prop' mangled-name='zpool_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_name_to_prop'>
- <parameter type-id='type-id-84' name='propname'/>
- <return type-id='type-id-160'/>
- </function-decl>
- <function-decl name='zpool_prop_get_table' mangled-name='zpool_prop_get_table' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_prop_get_table'>
- <return type-id='type-id-303'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/zcommon/zprop_common.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs' language='LANG_C99'>
- <function-decl name='zprop_width' mangled-name='zprop_width' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_width'>
- <parameter type-id='type-id-2' name='prop'/>
- <parameter type-id='type-id-85' name='fixed'/>
- <parameter type-id='type-id-66' name='type'/>
- <return type-id='type-id-18'/>
- </function-decl>
- <function-decl name='zprop_valid_for_type' mangled-name='zprop_valid_for_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_valid_for_type'>
- <parameter type-id='type-id-2' name='prop'/>
- <parameter type-id='type-id-66' name='type'/>
- <parameter type-id='type-id-9' name='headcheck'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='zprop_values' mangled-name='zprop_values' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_values'>
- <parameter type-id='type-id-2' name='prop'/>
- <parameter type-id='type-id-66' name='type'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zprop_random_value' mangled-name='zprop_random_value' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_random_value'>
- <parameter type-id='type-id-2' name='prop'/>
- <parameter type-id='type-id-7' name='seed'/>
- <parameter type-id='type-id-66' name='type'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <function-decl name='zprop_index_to_string' mangled-name='zprop_index_to_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_index_to_string'>
- <parameter type-id='type-id-2' name='prop'/>
- <parameter type-id='type-id-7' name='index'/>
- <parameter type-id='type-id-294' name='string'/>
- <parameter type-id='type-id-66' name='type'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zprop_string_to_index' mangled-name='zprop_string_to_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_string_to_index'>
- <parameter type-id='type-id-2' name='prop'/>
- <parameter type-id='type-id-84' name='string'/>
- <parameter type-id='type-id-108' name='index'/>
- <parameter type-id='type-id-66' name='type'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zprop_name_to_prop' mangled-name='zprop_name_to_prop' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_name_to_prop'>
- <parameter type-id='type-id-84' name='propname'/>
- <parameter type-id='type-id-66' name='type'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zprop_iter_common' mangled-name='zprop_iter_common' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_iter_common'>
- <parameter type-id='type-id-180' name='func'/>
- <parameter type-id='type-id-13' name='cb'/>
- <parameter type-id='type-id-9' name='show_all'/>
- <parameter type-id='type-id-9' name='ordered'/>
- <parameter type-id='type-id-66' name='type'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='zprop_register_hidden' mangled-name='zprop_register_hidden' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_hidden'>
- <parameter type-id='type-id-2' name='prop'/>
- <parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-293' name='type'/>
- <parameter type-id='type-id-297' name='attr'/>
- <parameter type-id='type-id-2' name='objset_types'/>
- <parameter type-id='type-id-84' name='colname'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zprop_register_index' mangled-name='zprop_register_index' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_index'>
- <parameter type-id='type-id-2' name='prop'/>
- <parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-7' name='def'/>
- <parameter type-id='type-id-297' name='attr'/>
- <parameter type-id='type-id-2' name='objset_types'/>
- <parameter type-id='type-id-84' name='values'/>
- <parameter type-id='type-id-84' name='colname'/>
- <parameter type-id='type-id-298' name='idx_tbl'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zprop_register_number' mangled-name='zprop_register_number' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_number'>
- <parameter type-id='type-id-2' name='prop'/>
- <parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-7' name='def'/>
- <parameter type-id='type-id-297' name='attr'/>
- <parameter type-id='type-id-2' name='objset_types'/>
- <parameter type-id='type-id-84' name='values'/>
- <parameter type-id='type-id-84' name='colname'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zprop_register_string' mangled-name='zprop_register_string' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_string'>
- <parameter type-id='type-id-2' name='prop'/>
- <parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-84' name='def'/>
- <parameter type-id='type-id-297' name='attr'/>
- <parameter type-id='type-id-2' name='objset_types'/>
- <parameter type-id='type-id-84' name='values'/>
- <parameter type-id='type-id-84' name='colname'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zprop_register_impl' mangled-name='zprop_register_impl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zprop_register_impl'>
- <parameter type-id='type-id-2' name='prop'/>
- <parameter type-id='type-id-84' name='name'/>
- <parameter type-id='type-id-293' name='type'/>
- <parameter type-id='type-id-7' name='numdefault'/>
- <parameter type-id='type-id-84' name='strdefault'/>
- <parameter type-id='type-id-297' name='attr'/>
- <parameter type-id='type-id-2' name='objset_types'/>
- <parameter type-id='type-id-84' name='values'/>
- <parameter type-id='type-id-84' name='colname'/>
- <parameter type-id='type-id-9' name='rightalign'/>
- <parameter type-id='type-id-9' name='visible'/>
- <parameter type-id='type-id-298' name='idx_tbl'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__ctype_tolower_loc' mangled-name='__ctype_tolower_loc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_mod_supported' mangled-name='zfs_mod_supported' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='libshare.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libshare' language='LANG_C99'>
- <function-decl name='sa_validate_shareopts' mangled-name='sa_validate_shareopts' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_validate_shareopts'>
- <parameter type-id='type-id-14' name='options'/>
- <parameter type-id='type-id-14' name='proto'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='sa_errorstr' mangled-name='sa_errorstr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_errorstr'>
- <parameter type-id='type-id-2' name='err'/>
- <return type-id='type-id-14'/>
- </function-decl>
- <function-decl name='sa_commit_shares' mangled-name='sa_commit_shares' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_commit_shares'>
- <parameter type-id='type-id-84' name='protocol'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='sa_is_shared' mangled-name='sa_is_shared' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_is_shared'>
- <parameter type-id='type-id-84' name='mountpoint'/>
- <parameter type-id='type-id-14' name='protocol'/>
- <return type-id='type-id-9'/>
- </function-decl>
- <function-decl name='sa_disable_share' mangled-name='sa_disable_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_disable_share'>
- <parameter type-id='type-id-84' name='mountpoint'/>
- <parameter type-id='type-id-14' name='protocol'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='sa_enable_share' mangled-name='sa_enable_share' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='sa_enable_share'>
- <parameter type-id='type-id-84' name='zfsname'/>
- <parameter type-id='type-id-84' name='mountpoint'/>
- <parameter type-id='type-id-84' name='shareopts'/>
- <parameter type-id='type-id-14' name='protocol'/>
- <return type-id='type-id-2'/>
- </function-decl>
- <function-decl name='libshare_nfs_init' mangled-name='libshare_nfs_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='libshare_smb_init' mangled-name='libshare_smb_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='nfs.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libshare' language='LANG_C99'>
- <function-decl name='mkostemp' mangled-name='mkostemp64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='mkdir' mangled-name='mkdir' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='flock' mangled-name='flock' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='rename' mangled-name='rename' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='unlink' mangled-name='unlink' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nfs_copy_entries' mangled-name='nfs_copy_entries' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/nfs.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libshare' language='LANG_C99'>
- <function-decl name='register_fstype' mangled-name='register_fstype' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nfs_toggle_share' mangled-name='nfs_toggle_share' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='fputs' mangled-name='fputs' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__builtin_stpcpy' mangled-name='stpcpy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/smb.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libshare' language='LANG_C99'>
- <class-decl name='smb_share_s' size-in-bits='36992' is-struct='yes' visibility='default' id='type-id-304'>
+ <typedef-decl name='zfs_stat_t' type-id='6417f0b9' id='0371a9c7'/>
+ <class-decl name='zfs_stat' size-in-bits='320' is-struct='yes' visibility='default' id='6417f0b9'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='name' type-id='type-id-305' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2040'>
- <var-decl name='path' type-id='type-id-193' visibility='default'/>
+ <var-decl name='zs_gen' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='34808'>
- <var-decl name='comment' type-id='type-id-305' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='zs_mode' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='36864'>
- <var-decl name='guest_ok' type-id='type-id-9' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='zs_links' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='36928'>
- <var-decl name='next' type-id='type-id-306' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='zs_ctime' type-id='c1c22e6c' visibility='default'/>
</data-member>
</class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-23' size-in-bits='2040' id='type-id-305'>
- <subrange length='255' type-id='type-id-24' id='type-id-307'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-304' size-in-bits='64' id='type-id-306'/>
- <typedef-decl name='smb_share_t' type-id='type-id-304' id='type-id-308'/>
- <pointer-type-def type-id='type-id-308' size-in-bits='64' id='type-id-309'/>
- <var-decl name='smb_shares' type-id='type-id-309' visibility='default'/>
- <function-decl name='__fgets_alias' mangled-name='fgets' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='opendir' mangled-name='opendir' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-1'/>
+ <pointer-type-def type-id='a5559cdd' size-in-bits='64' id='e4ec4540'/>
+ <function-decl name='zfs_version_kernel' mangled-name='zfs_version_kernel' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_version_kernel'>
+ <parameter type-id='26a90f95' name='version'/>
+ <parameter type-id='95e97e5e' name='len'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='libzfs_error_init' mangled-name='libzfs_error_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_error_init'>
+ <parameter type-id='95e97e5e' name='error'/>
+ <return type-id='80f4b756'/>
+ </function-decl>
+ <function-decl name='zfs_ioctl' mangled-name='zfs_ioctl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_ioctl'>
+ <parameter type-id='b0382bb3' name='hdl'/>
+ <parameter type-id='95e97e5e' name='request'/>
+ <parameter type-id='e4ec4540' name='zc'/>
+ <return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_dataset.c b/sys/contrib/openzfs/lib/libzfs/libzfs_dataset.c
index 0e3198d9c856..20251e9e7a5a 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_dataset.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_dataset.c
@@ -1,5568 +1,5567 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2019 Joyent, Inc.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2012 DEY Storage Systems, Inc. All rights reserved.
* Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
* Copyright (c) 2013 Martin Matuska. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright 2017-2018 RackTop Systems.
* Copyright (c) 2019 Datto Inc.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021 Matt Fiddaman
*/
#include <ctype.h>
#include <errno.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <stddef.h>
#include <zone.h>
#include <fcntl.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <pwd.h>
#include <grp.h>
#include <ucred.h>
#ifdef HAVE_IDMAP
#include <idmap.h>
#include <aclutils.h>
#include <directory.h>
#endif /* HAVE_IDMAP */
#include <sys/dnode.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/dsl_crypt.h>
#include <libzfs.h>
#include <libzutil.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "libzfs_impl.h"
#include "zfs_deleg.h"
static int userquota_propname_decode(const char *propname, boolean_t zoned,
zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp);
/*
* Given a single type (not a mask of types), return the type in a human
* readable form.
*/
const char *
zfs_type_to_name(zfs_type_t type)
{
switch (type) {
case ZFS_TYPE_FILESYSTEM:
return (dgettext(TEXT_DOMAIN, "filesystem"));
case ZFS_TYPE_SNAPSHOT:
return (dgettext(TEXT_DOMAIN, "snapshot"));
case ZFS_TYPE_VOLUME:
return (dgettext(TEXT_DOMAIN, "volume"));
case ZFS_TYPE_POOL:
return (dgettext(TEXT_DOMAIN, "pool"));
case ZFS_TYPE_BOOKMARK:
return (dgettext(TEXT_DOMAIN, "bookmark"));
default:
assert(!"unhandled zfs_type_t");
}
return (NULL);
}
/*
* Validate a ZFS path. This is used even before trying to open the dataset, to
* provide a more meaningful error message. We call zfs_error_aux() to
* explain exactly why the name was not valid.
*/
int
zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type,
boolean_t modifying)
{
namecheck_err_t why;
char what;
if (!(type & ZFS_TYPE_SNAPSHOT) && strchr(path, '@') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshot delimiter '@' is not expected here"));
return (0);
}
if (type == ZFS_TYPE_SNAPSHOT && strchr(path, '@') == NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing '@' delimiter in snapshot name"));
return (0);
}
if (!(type & ZFS_TYPE_BOOKMARK) && strchr(path, '#') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bookmark delimiter '#' is not expected here"));
return (0);
}
if (type == ZFS_TYPE_BOOKMARK && strchr(path, '#') == NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing '#' delimiter in bookmark name"));
return (0);
}
if (modifying && strchr(path, '%') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid character %c in name"), '%');
return (0);
}
if (entity_namecheck(path, &why, &what) != 0) {
if (hdl != NULL) {
switch (why) {
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is too long"));
break;
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"leading slash in name"));
break;
case NAME_ERR_EMPTY_COMPONENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty component or misplaced '@'"
" or '#' delimiter in name"));
break;
case NAME_ERR_TRAILING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"trailing slash in name"));
break;
case NAME_ERR_INVALCHAR:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "invalid character "
"'%c' in name"), what);
break;
case NAME_ERR_MULTIPLE_DELIMITERS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple '@' and/or '#' delimiters in "
"name"));
break;
case NAME_ERR_NOLETTER:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool doesn't begin with a letter"));
break;
case NAME_ERR_RESERVED:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is reserved"));
break;
case NAME_ERR_DISKLIKE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"reserved disk name"));
break;
case NAME_ERR_SELF_REF:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"self reference, '.' is found in name"));
break;
case NAME_ERR_PARENT_REF:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent reference, '..' is found in name"));
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"(%d) not defined"), why);
break;
}
}
return (0);
}
return (-1);
}
int
zfs_name_valid(const char *name, zfs_type_t type)
{
if (type == ZFS_TYPE_POOL)
return (zpool_name_valid(NULL, B_FALSE, name));
return (zfs_validate_name(NULL, name, type, B_FALSE));
}
/*
* This function takes the raw DSL properties, and filters out the user-defined
* properties into a separate nvlist.
*/
static nvlist_t *
process_user_props(zfs_handle_t *zhp, nvlist_t *props)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvpair_t *elem;
nvlist_t *propval;
nvlist_t *nvl;
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
if (!zfs_prop_user(nvpair_name(elem)))
continue;
verify(nvpair_value_nvlist(elem, &propval) == 0);
if (nvlist_add_nvlist(nvl, nvpair_name(elem), propval) != 0) {
nvlist_free(nvl);
(void) no_memory(hdl);
return (NULL);
}
}
return (nvl);
}
static zpool_handle_t *
zpool_add_handle(zfs_handle_t *zhp, const char *pool_name)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zpool_handle_t *zph;
if ((zph = zpool_open_canfail(hdl, pool_name)) != NULL) {
if (hdl->libzfs_pool_handles != NULL)
zph->zpool_next = hdl->libzfs_pool_handles;
hdl->libzfs_pool_handles = zph;
}
return (zph);
}
static zpool_handle_t *
zpool_find_handle(zfs_handle_t *zhp, const char *pool_name, int len)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zpool_handle_t *zph = hdl->libzfs_pool_handles;
while ((zph != NULL) &&
(strncmp(pool_name, zpool_get_name(zph), len) != 0))
zph = zph->zpool_next;
return (zph);
}
/*
* Returns a handle to the pool that contains the provided dataset.
* If a handle to that pool already exists then that handle is returned.
* Otherwise, a new handle is created and added to the list of handles.
*/
static zpool_handle_t *
zpool_handle(zfs_handle_t *zhp)
{
char *pool_name;
int len;
zpool_handle_t *zph;
len = strcspn(zhp->zfs_name, "/@#") + 1;
pool_name = zfs_alloc(zhp->zfs_hdl, len);
(void) strlcpy(pool_name, zhp->zfs_name, len);
zph = zpool_find_handle(zhp, pool_name, len);
if (zph == NULL)
zph = zpool_add_handle(zhp, pool_name);
free(pool_name);
return (zph);
}
void
zpool_free_handles(libzfs_handle_t *hdl)
{
zpool_handle_t *next, *zph = hdl->libzfs_pool_handles;
while (zph != NULL) {
next = zph->zpool_next;
zpool_close(zph);
zph = next;
}
hdl->libzfs_pool_handles = NULL;
}
/*
* Utility function to gather stats (objset and zpl) for the given object.
*/
static int
get_stats_ioctl(zfs_handle_t *zhp, zfs_cmd_t *zc)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
(void) strlcpy(zc->zc_name, zhp->zfs_name, sizeof (zc->zc_name));
while (zfs_ioctl(hdl, ZFS_IOC_OBJSET_STATS, zc) != 0) {
if (errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, zc) != 0) {
return (-1);
}
} else {
return (-1);
}
}
return (0);
}
/*
* Utility function to get the received properties of the given object.
*/
static int
get_recvd_props_ioctl(zfs_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *recvdprops;
zfs_cmd_t zc = {"\0"};
int err;
if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
while (zfs_ioctl(hdl, ZFS_IOC_OBJSET_RECVD_PROPS, &zc) != 0) {
if (errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
return (-1);
}
} else {
zcmd_free_nvlists(&zc);
return (-1);
}
}
err = zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &recvdprops);
zcmd_free_nvlists(&zc);
if (err != 0)
return (-1);
nvlist_free(zhp->zfs_recvd_props);
zhp->zfs_recvd_props = recvdprops;
return (0);
}
static int
put_stats_zhdl(zfs_handle_t *zhp, zfs_cmd_t *zc)
{
nvlist_t *allprops, *userprops;
zhp->zfs_dmustats = zc->zc_objset_stats; /* structure assignment */
if (zcmd_read_dst_nvlist(zhp->zfs_hdl, zc, &allprops) != 0) {
return (-1);
}
/*
* XXX Why do we store the user props separately, in addition to
* storing them in zfs_props?
*/
if ((userprops = process_user_props(zhp, allprops)) == NULL) {
nvlist_free(allprops);
return (-1);
}
nvlist_free(zhp->zfs_props);
nvlist_free(zhp->zfs_user_props);
zhp->zfs_props = allprops;
zhp->zfs_user_props = userprops;
return (0);
}
static int
get_stats(zfs_handle_t *zhp)
{
int rc = 0;
zfs_cmd_t zc = {"\0"};
if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
return (-1);
if (get_stats_ioctl(zhp, &zc) != 0)
rc = -1;
else if (put_stats_zhdl(zhp, &zc) != 0)
rc = -1;
zcmd_free_nvlists(&zc);
return (rc);
}
/*
* Refresh the properties currently stored in the handle.
*/
void
zfs_refresh_properties(zfs_handle_t *zhp)
{
(void) get_stats(zhp);
}
/*
* Makes a handle from the given dataset name. Used by zfs_open() and
* zfs_iter_* to create child handles on the fly.
*/
static int
make_dataset_handle_common(zfs_handle_t *zhp, zfs_cmd_t *zc)
{
if (put_stats_zhdl(zhp, zc) != 0)
return (-1);
/*
* We've managed to open the dataset and gather statistics. Determine
* the high-level type.
*/
if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
zhp->zfs_head_type = ZFS_TYPE_VOLUME;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
zhp->zfs_head_type = ZFS_TYPE_FILESYSTEM;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_OTHER)
return (-1);
else
abort();
if (zhp->zfs_dmustats.dds_is_snapshot)
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
zhp->zfs_type = ZFS_TYPE_VOLUME;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
zhp->zfs_type = ZFS_TYPE_FILESYSTEM;
else
abort(); /* we should never see any other types */
if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL)
return (-1);
return (0);
}
zfs_handle_t *
make_dataset_handle(libzfs_handle_t *hdl, const char *path)
{
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = hdl;
(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) {
free(zhp);
return (NULL);
}
if (get_stats_ioctl(zhp, &zc) == -1) {
zcmd_free_nvlists(&zc);
free(zhp);
return (NULL);
}
if (make_dataset_handle_common(zhp, &zc) == -1) {
free(zhp);
zhp = NULL;
}
zcmd_free_nvlists(&zc);
return (zhp);
}
zfs_handle_t *
make_dataset_handle_zc(libzfs_handle_t *hdl, zfs_cmd_t *zc)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = hdl;
(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
if (make_dataset_handle_common(zhp, zc) == -1) {
free(zhp);
return (NULL);
}
return (zhp);
}
zfs_handle_t *
make_dataset_simple_handle_zc(zfs_handle_t *pzhp, zfs_cmd_t *zc)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = pzhp->zfs_hdl;
(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
zhp->zfs_head_type = pzhp->zfs_type;
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
zhp->zpool_hdl = zpool_handle(zhp);
return (zhp);
}
zfs_handle_t *
zfs_handle_dup(zfs_handle_t *zhp_orig)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = zhp_orig->zfs_hdl;
zhp->zpool_hdl = zhp_orig->zpool_hdl;
(void) strlcpy(zhp->zfs_name, zhp_orig->zfs_name,
sizeof (zhp->zfs_name));
zhp->zfs_type = zhp_orig->zfs_type;
zhp->zfs_head_type = zhp_orig->zfs_head_type;
zhp->zfs_dmustats = zhp_orig->zfs_dmustats;
if (zhp_orig->zfs_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_props, &zhp->zfs_props, 0) != 0) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
if (zhp_orig->zfs_user_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_user_props,
&zhp->zfs_user_props, 0) != 0) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
if (zhp_orig->zfs_recvd_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_recvd_props,
&zhp->zfs_recvd_props, 0)) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
zhp->zfs_mntcheck = zhp_orig->zfs_mntcheck;
if (zhp_orig->zfs_mntopts != NULL) {
zhp->zfs_mntopts = zfs_strdup(zhp_orig->zfs_hdl,
zhp_orig->zfs_mntopts);
}
zhp->zfs_props_table = zhp_orig->zfs_props_table;
return (zhp);
}
boolean_t
zfs_bookmark_exists(const char *path)
{
nvlist_t *bmarks;
nvlist_t *props;
char fsname[ZFS_MAX_DATASET_NAME_LEN];
char *bmark_name;
char *pound;
int err;
boolean_t rv;
(void) strlcpy(fsname, path, sizeof (fsname));
pound = strchr(fsname, '#');
if (pound == NULL)
return (B_FALSE);
*pound = '\0';
bmark_name = pound + 1;
props = fnvlist_alloc();
err = lzc_get_bookmarks(fsname, props, &bmarks);
nvlist_free(props);
if (err != 0) {
nvlist_free(bmarks);
return (B_FALSE);
}
rv = nvlist_exists(bmarks, bmark_name);
nvlist_free(bmarks);
return (rv);
}
zfs_handle_t *
make_bookmark_handle(zfs_handle_t *parent, const char *path,
nvlist_t *bmark_props)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
/* Fill in the name. */
zhp->zfs_hdl = parent->zfs_hdl;
(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
/* Set the property lists. */
if (nvlist_dup(bmark_props, &zhp->zfs_props, 0) != 0) {
free(zhp);
return (NULL);
}
/* Set the types. */
zhp->zfs_head_type = parent->zfs_head_type;
zhp->zfs_type = ZFS_TYPE_BOOKMARK;
if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL) {
nvlist_free(zhp->zfs_props);
free(zhp);
return (NULL);
}
return (zhp);
}
struct zfs_open_bookmarks_cb_data {
const char *path;
zfs_handle_t *zhp;
};
static int
zfs_open_bookmarks_cb(zfs_handle_t *zhp, void *data)
{
struct zfs_open_bookmarks_cb_data *dp = data;
/*
* Is it the one we are looking for?
*/
if (strcmp(dp->path, zfs_get_name(zhp)) == 0) {
/*
* We found it. Save it and let the caller know we are done.
*/
dp->zhp = zhp;
return (EEXIST);
}
/*
* Not found. Close the handle and ask for another one.
*/
zfs_close(zhp);
return (0);
}
/*
* Opens the given snapshot, bookmark, filesystem, or volume. The 'types'
* argument is a mask of acceptable types. The function will print an
* appropriate error message and return NULL if it can't be opened.
*/
zfs_handle_t *
zfs_open(libzfs_handle_t *hdl, const char *path, int types)
{
zfs_handle_t *zhp;
char errbuf[1024];
char *bookp;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot open '%s'"), path);
/*
* Validate the name before we even try to open it.
*/
if (!zfs_validate_name(hdl, path, types, B_FALSE)) {
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
return (NULL);
}
/*
* Bookmarks needs to be handled separately.
*/
bookp = strchr(path, '#');
if (bookp == NULL) {
/*
* Try to get stats for the dataset, which will tell us if it
* exists.
*/
errno = 0;
if ((zhp = make_dataset_handle(hdl, path)) == NULL) {
(void) zfs_standard_error(hdl, errno, errbuf);
return (NULL);
}
} else {
char dsname[ZFS_MAX_DATASET_NAME_LEN];
zfs_handle_t *pzhp;
struct zfs_open_bookmarks_cb_data cb_data = {path, NULL};
/*
* We need to cut out '#' and everything after '#'
* to get the parent dataset name only.
*/
assert(bookp - path < sizeof (dsname));
(void) strncpy(dsname, path, bookp - path);
dsname[bookp - path] = '\0';
/*
* Create handle for the parent dataset.
*/
errno = 0;
if ((pzhp = make_dataset_handle(hdl, dsname)) == NULL) {
(void) zfs_standard_error(hdl, errno, errbuf);
return (NULL);
}
/*
* Iterate bookmarks to find the right one.
*/
errno = 0;
if ((zfs_iter_bookmarks(pzhp, zfs_open_bookmarks_cb,
&cb_data) == 0) && (cb_data.zhp == NULL)) {
(void) zfs_error(hdl, EZFS_NOENT, errbuf);
zfs_close(pzhp);
return (NULL);
}
if (cb_data.zhp == NULL) {
(void) zfs_standard_error(hdl, errno, errbuf);
zfs_close(pzhp);
return (NULL);
}
zhp = cb_data.zhp;
/*
* Cleanup.
*/
zfs_close(pzhp);
}
if (!(types & zhp->zfs_type)) {
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
zfs_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Release a ZFS handle. Nothing to do but free the associated memory.
*/
void
zfs_close(zfs_handle_t *zhp)
{
if (zhp->zfs_mntopts)
free(zhp->zfs_mntopts);
nvlist_free(zhp->zfs_props);
nvlist_free(zhp->zfs_user_props);
nvlist_free(zhp->zfs_recvd_props);
free(zhp);
}
typedef struct mnttab_node {
struct mnttab mtn_mt;
avl_node_t mtn_node;
} mnttab_node_t;
static int
libzfs_mnttab_cache_compare(const void *arg1, const void *arg2)
{
const mnttab_node_t *mtn1 = (const mnttab_node_t *)arg1;
const mnttab_node_t *mtn2 = (const mnttab_node_t *)arg2;
int rv;
rv = strcmp(mtn1->mtn_mt.mnt_special, mtn2->mtn_mt.mnt_special);
return (TREE_ISIGN(rv));
}
void
libzfs_mnttab_init(libzfs_handle_t *hdl)
{
pthread_mutex_init(&hdl->libzfs_mnttab_cache_lock, NULL);
assert(avl_numnodes(&hdl->libzfs_mnttab_cache) == 0);
avl_create(&hdl->libzfs_mnttab_cache, libzfs_mnttab_cache_compare,
sizeof (mnttab_node_t), offsetof(mnttab_node_t, mtn_node));
}
static int
libzfs_mnttab_update(libzfs_handle_t *hdl)
{
FILE *mnttab;
struct mnttab entry;
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
return (ENOENT);
while (getmntent(mnttab, &entry) == 0) {
mnttab_node_t *mtn;
avl_index_t where;
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
continue;
mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
mtn->mtn_mt.mnt_special = zfs_strdup(hdl, entry.mnt_special);
mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, entry.mnt_mountp);
mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, entry.mnt_fstype);
mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, entry.mnt_mntopts);
/* Exclude duplicate mounts */
if (avl_find(&hdl->libzfs_mnttab_cache, mtn, &where) != NULL) {
free(mtn->mtn_mt.mnt_special);
free(mtn->mtn_mt.mnt_mountp);
free(mtn->mtn_mt.mnt_fstype);
free(mtn->mtn_mt.mnt_mntopts);
free(mtn);
continue;
}
avl_add(&hdl->libzfs_mnttab_cache, mtn);
}
(void) fclose(mnttab);
return (0);
}
void
libzfs_mnttab_fini(libzfs_handle_t *hdl)
{
void *cookie = NULL;
mnttab_node_t *mtn;
while ((mtn = avl_destroy_nodes(&hdl->libzfs_mnttab_cache, &cookie))
!= NULL) {
free(mtn->mtn_mt.mnt_special);
free(mtn->mtn_mt.mnt_mountp);
free(mtn->mtn_mt.mnt_fstype);
free(mtn->mtn_mt.mnt_mntopts);
free(mtn);
}
avl_destroy(&hdl->libzfs_mnttab_cache);
(void) pthread_mutex_destroy(&hdl->libzfs_mnttab_cache_lock);
}
void
libzfs_mnttab_cache(libzfs_handle_t *hdl, boolean_t enable)
{
hdl->libzfs_mnttab_enable = enable;
}
int
libzfs_mnttab_find(libzfs_handle_t *hdl, const char *fsname,
struct mnttab *entry)
{
FILE *mnttab;
mnttab_node_t find;
mnttab_node_t *mtn;
int ret = ENOENT;
if (!hdl->libzfs_mnttab_enable) {
struct mnttab srch = { 0 };
if (avl_numnodes(&hdl->libzfs_mnttab_cache))
libzfs_mnttab_fini(hdl);
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
return (ENOENT);
srch.mnt_special = (char *)fsname;
srch.mnt_fstype = MNTTYPE_ZFS;
ret = getmntany(mnttab, entry, &srch) ? ENOENT : 0;
(void) fclose(mnttab);
return (ret);
}
pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock);
if (avl_numnodes(&hdl->libzfs_mnttab_cache) == 0) {
int error;
if ((error = libzfs_mnttab_update(hdl)) != 0) {
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
return (error);
}
}
find.mtn_mt.mnt_special = (char *)fsname;
mtn = avl_find(&hdl->libzfs_mnttab_cache, &find, NULL);
if (mtn) {
*entry = mtn->mtn_mt;
ret = 0;
}
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
return (ret);
}
void
libzfs_mnttab_add(libzfs_handle_t *hdl, const char *special,
const char *mountp, const char *mntopts)
{
mnttab_node_t *mtn;
pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock);
if (avl_numnodes(&hdl->libzfs_mnttab_cache) != 0) {
mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
mtn->mtn_mt.mnt_special = zfs_strdup(hdl, special);
mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, mountp);
mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, MNTTYPE_ZFS);
mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, mntopts);
/*
* Another thread may have already added this entry
* via libzfs_mnttab_update. If so we should skip it.
*/
if (avl_find(&hdl->libzfs_mnttab_cache, mtn, NULL) != NULL) {
free(mtn->mtn_mt.mnt_special);
free(mtn->mtn_mt.mnt_mountp);
free(mtn->mtn_mt.mnt_fstype);
free(mtn->mtn_mt.mnt_mntopts);
free(mtn);
} else {
avl_add(&hdl->libzfs_mnttab_cache, mtn);
}
}
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
}
void
libzfs_mnttab_remove(libzfs_handle_t *hdl, const char *fsname)
{
mnttab_node_t find;
mnttab_node_t *ret;
pthread_mutex_lock(&hdl->libzfs_mnttab_cache_lock);
find.mtn_mt.mnt_special = (char *)fsname;
if ((ret = avl_find(&hdl->libzfs_mnttab_cache, (void *)&find, NULL))
!= NULL) {
avl_remove(&hdl->libzfs_mnttab_cache, ret);
free(ret->mtn_mt.mnt_special);
free(ret->mtn_mt.mnt_mountp);
free(ret->mtn_mt.mnt_fstype);
free(ret->mtn_mt.mnt_mntopts);
free(ret);
}
pthread_mutex_unlock(&hdl->libzfs_mnttab_cache_lock);
}
int
zfs_spa_version(zfs_handle_t *zhp, int *spa_version)
{
zpool_handle_t *zpool_handle = zhp->zpool_hdl;
if (zpool_handle == NULL)
return (-1);
*spa_version = zpool_get_prop_int(zpool_handle,
ZPOOL_PROP_VERSION, NULL);
return (0);
}
/*
* The choice of reservation property depends on the SPA version.
*/
static int
zfs_which_resv_prop(zfs_handle_t *zhp, zfs_prop_t *resv_prop)
{
int spa_version;
if (zfs_spa_version(zhp, &spa_version) < 0)
return (-1);
if (spa_version >= SPA_VERSION_REFRESERVATION)
*resv_prop = ZFS_PROP_REFRESERVATION;
else
*resv_prop = ZFS_PROP_RESERVATION;
return (0);
}
/*
* Given an nvlist of properties to set, validates that they are correct, and
* parses any numeric properties (index, boolean, etc) if they are specified as
* strings.
*/
nvlist_t *
zfs_valid_proplist(libzfs_handle_t *hdl, zfs_type_t type, nvlist_t *nvl,
uint64_t zoned, zfs_handle_t *zhp, zpool_handle_t *zpool_hdl,
boolean_t key_params_ok, const char *errbuf)
{
nvpair_t *elem;
uint64_t intval;
char *strval;
zfs_prop_t prop;
nvlist_t *ret;
int chosen_normal = -1;
int chosen_utf = -1;
if (nvlist_alloc(&ret, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
/*
* Make sure this property is valid and applies to this type.
*/
elem = NULL;
while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) {
const char *propname = nvpair_name(elem);
prop = zfs_name_to_prop(propname);
if (prop == ZPROP_INVAL && zfs_prop_user(propname)) {
/*
* This is a user property: make sure it's a
* string, and that it's less than ZAP_MAXNAMELEN.
*/
if (nvpair_type(elem) != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property name '%s' is too long"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
(void) nvpair_value_string(elem, &strval);
if (nvlist_add_string(ret, propname, strval) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
}
/*
* Currently, only user properties can be modified on
* snapshots.
*/
if (type == ZFS_TYPE_SNAPSHOT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"this property can not be modified for snapshots"));
(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
goto error;
}
if (prop == ZPROP_INVAL && zfs_prop_userquota(propname)) {
zfs_userquota_prop_t uqtype;
char *newpropname = NULL;
char domain[128];
uint64_t rid;
uint64_t valary[3];
int rc;
if (userquota_propname_decode(propname, zoned,
&uqtype, domain, sizeof (domain), &rid) != 0) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"'%s' has an invalid user/group name"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (uqtype != ZFS_PROP_USERQUOTA &&
uqtype != ZFS_PROP_GROUPQUOTA &&
uqtype != ZFS_PROP_USEROBJQUOTA &&
uqtype != ZFS_PROP_GROUPOBJQUOTA &&
uqtype != ZFS_PROP_PROJECTQUOTA &&
uqtype != ZFS_PROP_PROJECTOBJQUOTA) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY,
errbuf);
goto error;
}
if (nvpair_type(elem) == DATA_TYPE_STRING) {
(void) nvpair_value_string(elem, &strval);
if (strcmp(strval, "none") == 0) {
intval = 0;
} else if (zfs_nicestrtonum(hdl,
strval, &intval) != 0) {
(void) zfs_error(hdl,
EZFS_BADPROP, errbuf);
goto error;
}
} else if (nvpair_type(elem) ==
DATA_TYPE_UINT64) {
(void) nvpair_value_uint64(elem, &intval);
if (intval == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"use 'none' to disable "
"{user|group|project}quota"));
goto error;
}
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a number"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/*
* Encode the prop name as
* userquota@<hex-rid>-domain, to make it easy
* for the kernel to decode.
*/
rc = asprintf(&newpropname, "%s%llx-%s",
zfs_userquota_prop_prefixes[uqtype],
(longlong_t)rid, domain);
if (rc == -1 || newpropname == NULL) {
(void) no_memory(hdl);
goto error;
}
valary[0] = uqtype;
valary[1] = rid;
valary[2] = intval;
if (nvlist_add_uint64_array(ret, newpropname,
valary, 3) != 0) {
free(newpropname);
(void) no_memory(hdl);
goto error;
}
free(newpropname);
continue;
} else if (prop == ZPROP_INVAL && zfs_prop_written(propname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (prop == ZPROP_INVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (!zfs_prop_valid_for_type(prop, type, B_FALSE)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' does not "
"apply to datasets of this type"), propname);
(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
goto error;
}
if (zfs_prop_readonly(prop) &&
!(zfs_prop_setonce(prop) && zhp == NULL) &&
!(zfs_prop_encryption_key_param(prop) && key_params_ok)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, prop, type, ret,
&strval, &intval, errbuf) != 0)
goto error;
/*
* Perform some additional checks for specific properties.
*/
switch (prop) {
case ZFS_PROP_VERSION:
{
int version;
if (zhp == NULL)
break;
version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
if (intval < version) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Can not downgrade; already at version %u"),
version);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
}
case ZFS_PROP_VOLBLOCKSIZE:
case ZFS_PROP_RECORDSIZE:
{
int maxbs = SPA_MAXBLOCKSIZE;
char buf[64];
if (zpool_hdl != NULL) {
maxbs = zpool_get_prop_int(zpool_hdl,
ZPOOL_PROP_MAXBLOCKSIZE, NULL);
}
/*
* The value must be a power of two between
* SPA_MINBLOCKSIZE and maxbs.
*/
if (intval < SPA_MINBLOCKSIZE ||
intval > maxbs || !ISP2(intval)) {
zfs_nicebytes(maxbs, buf, sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be power of 2 from 512B "
"to %s"), propname, buf);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
}
case ZFS_PROP_SPECIAL_SMALL_BLOCKS:
{
int maxbs = SPA_OLD_MAXBLOCKSIZE;
char buf[64];
if (zpool_hdl != NULL) {
char state[64] = "";
maxbs = zpool_get_prop_int(zpool_hdl,
ZPOOL_PROP_MAXBLOCKSIZE, NULL);
/*
* Issue a warning but do not fail so that
* tests for settable properties succeed.
*/
if (zpool_prop_get_feature(zpool_hdl,
"feature@allocation_classes", state,
sizeof (state)) != 0 ||
strcmp(state, ZFS_FEATURE_ACTIVE) != 0) {
(void) fprintf(stderr, gettext(
"%s: property requires a special "
"device in the pool\n"), propname);
}
}
if (intval != 0 &&
(intval < SPA_MINBLOCKSIZE ||
intval > maxbs || !ISP2(intval))) {
zfs_nicebytes(maxbs, buf, sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid '%s=%llu' property: must be zero "
"or a power of 2 from 512B to %s"),
propname, (unsigned long long)intval, buf);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
}
case ZFS_PROP_MLSLABEL:
{
#ifdef HAVE_MLSLABEL
/*
* Verify the mlslabel string and convert to
* internal hex label string.
*/
m_label_t *new_sl;
char *hex = NULL; /* internal label string */
/* Default value is already OK. */
if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0)
break;
/* Verify the label can be converted to binary form */
if (((new_sl = m_label_alloc(MAC_LABEL)) == NULL) ||
(str_to_label(strval, &new_sl, MAC_LABEL,
L_NO_CORRECTION, NULL) == -1)) {
goto badlabel;
}
/* Now translate to hex internal label string */
if (label_to_str(new_sl, &hex, M_INTERNAL,
DEF_NAMES) != 0) {
if (hex)
free(hex);
goto badlabel;
}
m_label_free(new_sl);
/* If string is already in internal form, we're done. */
if (strcmp(strval, hex) == 0) {
free(hex);
break;
}
/* Replace the label string with the internal form. */
(void) nvlist_remove(ret, zfs_prop_to_name(prop),
DATA_TYPE_STRING);
verify(nvlist_add_string(ret, zfs_prop_to_name(prop),
hex) == 0);
free(hex);
break;
badlabel:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid mlslabel '%s'"), strval);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
m_label_free(new_sl); /* OK if null */
goto error;
#else
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"mlslabels are unsupported"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
#endif /* HAVE_MLSLABEL */
}
case ZFS_PROP_MOUNTPOINT:
{
namecheck_err_t why;
if (strcmp(strval, ZFS_MOUNTPOINT_NONE) == 0 ||
strcmp(strval, ZFS_MOUNTPOINT_LEGACY) == 0)
break;
if (mountpoint_namecheck(strval, &why)) {
switch (why) {
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"'%s' must be an absolute path, "
"'none', or 'legacy'"), propname);
break;
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"component of '%s' is too long"),
propname);
break;
default:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"(%d) not defined"),
why);
break;
}
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
+ fallthrough;
}
- /* FALLTHROUGH */
-
case ZFS_PROP_SHARESMB:
case ZFS_PROP_SHARENFS:
/*
* For the mountpoint and sharenfs or sharesmb
* properties, check if it can be set in a
* global/non-global zone based on
* the zoned property value:
*
* global zone non-global zone
* --------------------------------------------------
* zoned=on mountpoint (no) mountpoint (yes)
* sharenfs (no) sharenfs (no)
* sharesmb (no) sharesmb (no)
*
* zoned=off mountpoint (yes) N/A
* sharenfs (yes)
* sharesmb (yes)
*/
if (zoned) {
if (getzoneid() == GLOBAL_ZONEID) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set on "
"dataset in a non-global zone"),
propname);
(void) zfs_error(hdl, EZFS_ZONED,
errbuf);
goto error;
} else if (prop == ZFS_PROP_SHARENFS ||
prop == ZFS_PROP_SHARESMB) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set in "
"a non-global zone"), propname);
(void) zfs_error(hdl, EZFS_ZONED,
errbuf);
goto error;
}
} else if (getzoneid() != GLOBAL_ZONEID) {
/*
* If zoned property is 'off', this must be in
* a global zone. If not, something is wrong.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set while dataset "
"'zoned' property is set"), propname);
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
/*
* At this point, it is legitimate to set the
* property. Now we want to make sure that the
* property value is valid if it is sharenfs.
*/
if ((prop == ZFS_PROP_SHARENFS ||
prop == ZFS_PROP_SHARESMB) &&
strcmp(strval, "on") != 0 &&
strcmp(strval, "off") != 0) {
zfs_share_proto_t proto;
if (prop == ZFS_PROP_SHARESMB)
proto = PROTO_SMB;
else
proto = PROTO_NFS;
if (zfs_parse_options(strval, proto) != SA_OK) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set to invalid "
"options"), propname);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
}
break;
case ZFS_PROP_KEYLOCATION:
if (!zfs_prop_valid_keylocation(strval, B_FALSE)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid keylocation"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zhp != NULL) {
uint64_t crypt =
zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
if (crypt == ZIO_CRYPT_OFF &&
strcmp(strval, "none") != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation must be 'none' "
"for unencrypted datasets"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
} else if (crypt != ZIO_CRYPT_OFF &&
strcmp(strval, "none") == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation must not be 'none' "
"for encrypted datasets"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
}
break;
case ZFS_PROP_PBKDF2_ITERS:
if (intval < MIN_PBKDF2_ITERATIONS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"minimum pbkdf2 iterations is %u"),
MIN_PBKDF2_ITERATIONS);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZFS_PROP_UTF8ONLY:
chosen_utf = (int)intval;
break;
case ZFS_PROP_NORMALIZE:
chosen_normal = (int)intval;
break;
default:
break;
}
/*
* For changes to existing volumes, we have some additional
* checks to enforce.
*/
if (type == ZFS_TYPE_VOLUME && zhp != NULL) {
uint64_t blocksize = zfs_prop_get_int(zhp,
ZFS_PROP_VOLBLOCKSIZE);
char buf[64];
switch (prop) {
case ZFS_PROP_VOLSIZE:
if (intval % blocksize != 0) {
zfs_nicebytes(blocksize, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a multiple of "
"volume block size (%s)"),
propname, buf);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
if (intval == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be zero"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
break;
default:
break;
}
}
/* check encryption properties */
if (zhp != NULL) {
int64_t crypt = zfs_prop_get_int(zhp,
ZFS_PROP_ENCRYPTION);
switch (prop) {
case ZFS_PROP_COPIES:
if (crypt != ZIO_CRYPT_OFF && intval > 2) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encrypted datasets cannot have "
"3 copies"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
break;
default:
break;
}
}
}
/*
* If normalization was chosen, but no UTF8 choice was made,
* enforce rejection of non-UTF8 names.
*
* If normalization was chosen, but rejecting non-UTF8 names
* was explicitly not chosen, it is an error.
*/
if (chosen_normal > 0 && chosen_utf < 0) {
if (nvlist_add_uint64(ret,
zfs_prop_to_name(ZFS_PROP_UTF8ONLY), 1) != 0) {
(void) no_memory(hdl);
goto error;
}
} else if (chosen_normal > 0 && chosen_utf == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be set 'on' if normalization chosen"),
zfs_prop_to_name(ZFS_PROP_UTF8ONLY));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
return (ret);
error:
nvlist_free(ret);
return (NULL);
}
static int
zfs_add_synthetic_resv(zfs_handle_t *zhp, nvlist_t *nvl)
{
uint64_t old_volsize;
uint64_t new_volsize;
uint64_t old_reservation;
uint64_t new_reservation;
zfs_prop_t resv_prop;
nvlist_t *props;
zpool_handle_t *zph = zpool_handle(zhp);
/*
* If this is an existing volume, and someone is setting the volsize,
* make sure that it matches the reservation, or add it if necessary.
*/
old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
return (-1);
old_reservation = zfs_prop_get_int(zhp, resv_prop);
props = fnvlist_alloc();
fnvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE));
if ((zvol_volsize_to_reservation(zph, old_volsize, props) !=
old_reservation) || nvlist_exists(nvl,
zfs_prop_to_name(resv_prop))) {
fnvlist_free(props);
return (0);
}
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE),
&new_volsize) != 0) {
fnvlist_free(props);
return (-1);
}
new_reservation = zvol_volsize_to_reservation(zph, new_volsize, props);
fnvlist_free(props);
if (nvlist_add_uint64(nvl, zfs_prop_to_name(resv_prop),
new_reservation) != 0) {
(void) no_memory(zhp->zfs_hdl);
return (-1);
}
return (1);
}
/*
* Helper for 'zfs {set|clone} refreservation=auto'. Must be called after
* zfs_valid_proplist(), as it is what sets the UINT64_MAX sentinel value.
* Return codes must match zfs_add_synthetic_resv().
*/
static int
zfs_fix_auto_resv(zfs_handle_t *zhp, nvlist_t *nvl)
{
uint64_t volsize;
uint64_t resvsize;
zfs_prop_t prop;
nvlist_t *props;
if (!ZFS_IS_VOLUME(zhp)) {
return (0);
}
if (zfs_which_resv_prop(zhp, &prop) != 0) {
return (-1);
}
if (prop != ZFS_PROP_REFRESERVATION) {
return (0);
}
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(prop), &resvsize) != 0) {
/* No value being set, so it can't be "auto" */
return (0);
}
if (resvsize != UINT64_MAX) {
/* Being set to a value other than "auto" */
return (0);
}
props = fnvlist_alloc();
fnvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE));
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE),
&volsize) != 0) {
volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
}
resvsize = zvol_volsize_to_reservation(zpool_handle(zhp), volsize,
props);
fnvlist_free(props);
(void) nvlist_remove_all(nvl, zfs_prop_to_name(prop));
if (nvlist_add_uint64(nvl, zfs_prop_to_name(prop), resvsize) != 0) {
(void) no_memory(zhp->zfs_hdl);
return (-1);
}
return (1);
}
static boolean_t
zfs_is_namespace_prop(zfs_prop_t prop)
{
switch (prop) {
case ZFS_PROP_ATIME:
case ZFS_PROP_RELATIME:
case ZFS_PROP_DEVICES:
case ZFS_PROP_EXEC:
case ZFS_PROP_SETUID:
case ZFS_PROP_READONLY:
case ZFS_PROP_XATTR:
case ZFS_PROP_NBMAND:
return (B_TRUE);
default:
return (B_FALSE);
}
}
/*
* Given a property name and value, set the property for the given dataset.
*/
int
zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
{
int ret = -1;
char errbuf[1024];
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *nvl = NULL;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zfs_name);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_string(nvl, propname, propval) != 0) {
(void) no_memory(hdl);
goto error;
}
ret = zfs_prop_set_list(zhp, nvl);
error:
nvlist_free(nvl);
return (ret);
}
/*
* Given an nvlist of property names and values, set the properties for the
* given dataset.
*/
int
zfs_prop_set_list(zfs_handle_t *zhp, nvlist_t *props)
{
zfs_cmd_t zc = {"\0"};
int ret = -1;
prop_changelist_t **cls = NULL;
int cl_idx;
char errbuf[1024];
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *nvl;
int nvl_len = 0;
int added_resv = 0;
zfs_prop_t prop = 0;
nvpair_t *elem;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zfs_name);
if ((nvl = zfs_valid_proplist(hdl, zhp->zfs_type, props,
zfs_prop_get_int(zhp, ZFS_PROP_ZONED), zhp, zhp->zpool_hdl,
B_FALSE, errbuf)) == NULL)
goto error;
/*
* We have to check for any extra properties which need to be added
* before computing the length of the nvlist.
*/
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem)) {
if (zfs_name_to_prop(nvpair_name(elem)) == ZFS_PROP_VOLSIZE &&
(added_resv = zfs_add_synthetic_resv(zhp, nvl)) == -1) {
goto error;
}
}
if (added_resv != 1 &&
(added_resv = zfs_fix_auto_resv(zhp, nvl)) == -1) {
goto error;
}
/*
* Check how many properties we're setting and allocate an array to
* store changelist pointers for postfix().
*/
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem))
nvl_len++;
if ((cls = calloc(nvl_len, sizeof (prop_changelist_t *))) == NULL)
goto error;
cl_idx = 0;
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem)) {
prop = zfs_name_to_prop(nvpair_name(elem));
assert(cl_idx < nvl_len);
/*
* We don't want to unmount & remount the dataset when changing
* its canmount property to 'on' or 'noauto'. We only use
* the changelist logic to unmount when setting canmount=off.
*/
if (prop != ZFS_PROP_CANMOUNT ||
(fnvpair_value_uint64(elem) == ZFS_CANMOUNT_OFF &&
zfs_is_mounted(zhp, NULL))) {
cls[cl_idx] = changelist_gather(zhp, prop, 0, 0);
if (cls[cl_idx] == NULL)
goto error;
}
if (prop == ZFS_PROP_MOUNTPOINT &&
changelist_haszonedchild(cls[cl_idx])) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
ret = zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
if (cls[cl_idx] != NULL &&
(ret = changelist_prefix(cls[cl_idx])) != 0)
goto error;
cl_idx++;
}
assert(cl_idx == nvl_len);
/*
* Execute the corresponding ioctl() to set this list of properties.
*/
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if ((ret = zcmd_write_src_nvlist(hdl, &zc, nvl)) != 0 ||
(ret = zcmd_alloc_dst_nvlist(hdl, &zc, 0)) != 0)
goto error;
ret = zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
if (ret != 0) {
if (zc.zc_nvlist_dst_filled == B_FALSE) {
(void) zfs_standard_error(hdl, errno, errbuf);
goto error;
}
/* Get the list of unset properties back and report them. */
nvlist_t *errorprops = NULL;
if (zcmd_read_dst_nvlist(hdl, &zc, &errorprops) != 0)
goto error;
for (nvpair_t *elem = nvlist_next_nvpair(errorprops, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errorprops, elem)) {
prop = zfs_name_to_prop(nvpair_name(elem));
zfs_setprop_error(hdl, prop, errno, errbuf);
}
nvlist_free(errorprops);
if (added_resv && errno == ENOSPC) {
/* clean up the volsize property we tried to set */
uint64_t old_volsize = zfs_prop_get_int(zhp,
ZFS_PROP_VOLSIZE);
nvlist_free(nvl);
nvl = NULL;
zcmd_free_nvlists(&zc);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
goto error;
if (nvlist_add_uint64(nvl,
zfs_prop_to_name(ZFS_PROP_VOLSIZE),
old_volsize) != 0)
goto error;
if (zcmd_write_src_nvlist(hdl, &zc, nvl) != 0)
goto error;
(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
}
} else {
for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) {
if (cls[cl_idx] != NULL) {
int clp_err = changelist_postfix(cls[cl_idx]);
if (clp_err != 0)
ret = clp_err;
}
}
if (ret == 0) {
/*
* Refresh the statistics so the new property
* value is reflected.
*/
(void) get_stats(zhp);
/*
* Remount the filesystem to propagate the change
* if one of the options handled by the generic
* Linux namespace layer has been modified.
*/
if (zfs_is_namespace_prop(prop) &&
zfs_is_mounted(zhp, NULL))
ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0);
}
}
error:
nvlist_free(nvl);
zcmd_free_nvlists(&zc);
if (cls != NULL) {
for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) {
if (cls[cl_idx] != NULL)
changelist_free(cls[cl_idx]);
}
free(cls);
}
return (ret);
}
/*
* Given a property, inherit the value from the parent dataset, or if received
* is TRUE, revert to the received value, if any.
*/
int
zfs_prop_inherit(zfs_handle_t *zhp, const char *propname, boolean_t received)
{
zfs_cmd_t zc = {"\0"};
int ret;
prop_changelist_t *cl;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
zfs_prop_t prop;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot inherit %s for '%s'"), propname, zhp->zfs_name);
zc.zc_cookie = received;
if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL) {
/*
* For user properties, the amount of work we have to do is very
* small, so just do it here.
*/
if (!zfs_prop_user(propname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc) != 0)
return (zfs_standard_error(hdl, errno, errbuf));
(void) get_stats(zhp);
return (0);
}
/*
* Verify that this property is inheritable.
*/
if (zfs_prop_readonly(prop))
return (zfs_error(hdl, EZFS_PROPREADONLY, errbuf));
if (!zfs_prop_inheritable(prop) && !received)
return (zfs_error(hdl, EZFS_PROPNONINHERIT, errbuf));
/*
* Check to see if the value applies to this type
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE))
return (zfs_error(hdl, EZFS_PROPTYPE, errbuf));
/*
* Normalize the name, to get rid of shorthand abbreviations.
*/
propname = zfs_prop_to_name(prop);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
if (prop == ZFS_PROP_MOUNTPOINT && getzoneid() == GLOBAL_ZONEID &&
zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is used in a non-global zone"));
return (zfs_error(hdl, EZFS_ZONED, errbuf));
}
/*
* Determine datasets which will be affected by this change, if any.
*/
if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL)
return (-1);
if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
ret = zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc)) != 0) {
return (zfs_standard_error(hdl, errno, errbuf));
} else {
if ((ret = changelist_postfix(cl)) != 0)
goto error;
/*
* Refresh the statistics so the new property is reflected.
*/
(void) get_stats(zhp);
/*
* Remount the filesystem to propagate the change
* if one of the options handled by the generic
* Linux namespace layer has been modified.
*/
if (zfs_is_namespace_prop(prop) &&
zfs_is_mounted(zhp, NULL))
ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0);
}
error:
changelist_free(cl);
return (ret);
}
/*
* True DSL properties are stored in an nvlist. The following two functions
* extract them appropriately.
*/
uint64_t
getprop_uint64(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
{
nvlist_t *nv;
uint64_t value;
*source = NULL;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(prop), &nv) == 0) {
verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
} else {
verify(!zhp->zfs_props_table ||
zhp->zfs_props_table[prop] == B_TRUE);
value = zfs_prop_default_numeric(prop);
*source = "";
}
return (value);
}
static const char *
getprop_string(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
{
nvlist_t *nv;
const char *value;
*source = NULL;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(prop), &nv) == 0) {
value = fnvlist_lookup_string(nv, ZPROP_VALUE);
(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
} else {
verify(!zhp->zfs_props_table ||
zhp->zfs_props_table[prop] == B_TRUE);
value = zfs_prop_default_string(prop);
*source = "";
}
return (value);
}
static boolean_t
zfs_is_recvd_props_mode(zfs_handle_t *zhp)
{
return (zhp->zfs_props == zhp->zfs_recvd_props);
}
static void
zfs_set_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie)
{
*cookie = (uint64_t)(uintptr_t)zhp->zfs_props;
zhp->zfs_props = zhp->zfs_recvd_props;
}
static void
zfs_unset_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie)
{
zhp->zfs_props = (nvlist_t *)(uintptr_t)*cookie;
*cookie = 0;
}
/*
* Internal function for getting a numeric property. Both zfs_prop_get() and
* zfs_prop_get_int() are built using this interface.
*
* Certain properties can be overridden using 'mount -o'. In this case, scan
* the contents of the /proc/self/mounts entry, searching for the
* appropriate options. If they differ from the on-disk values, report the
* current values and mark the source "temporary".
*/
static int
get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zprop_source_t *src,
char **source, uint64_t *val)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *zplprops = NULL;
struct mnttab mnt;
char *mntopt_on = NULL;
char *mntopt_off = NULL;
boolean_t received = zfs_is_recvd_props_mode(zhp);
*source = NULL;
/*
* If the property is being fetched for a snapshot, check whether
* the property is valid for the snapshot's head dataset type.
*/
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT &&
!zfs_prop_valid_for_type(prop, zhp->zfs_head_type, B_TRUE)) {
*val = zfs_prop_default_numeric(prop);
return (-1);
}
switch (prop) {
case ZFS_PROP_ATIME:
mntopt_on = MNTOPT_ATIME;
mntopt_off = MNTOPT_NOATIME;
break;
case ZFS_PROP_RELATIME:
mntopt_on = MNTOPT_RELATIME;
mntopt_off = MNTOPT_NORELATIME;
break;
case ZFS_PROP_DEVICES:
mntopt_on = MNTOPT_DEVICES;
mntopt_off = MNTOPT_NODEVICES;
break;
case ZFS_PROP_EXEC:
mntopt_on = MNTOPT_EXEC;
mntopt_off = MNTOPT_NOEXEC;
break;
case ZFS_PROP_READONLY:
mntopt_on = MNTOPT_RO;
mntopt_off = MNTOPT_RW;
break;
case ZFS_PROP_SETUID:
mntopt_on = MNTOPT_SETUID;
mntopt_off = MNTOPT_NOSETUID;
break;
case ZFS_PROP_XATTR:
mntopt_on = MNTOPT_XATTR;
mntopt_off = MNTOPT_NOXATTR;
break;
case ZFS_PROP_NBMAND:
mntopt_on = MNTOPT_NBMAND;
mntopt_off = MNTOPT_NONBMAND;
break;
default:
break;
}
/*
* Because looking up the mount options is potentially expensive
* (iterating over all of /proc/self/mounts), we defer its
* calculation until we're looking up a property which requires
* its presence.
*/
if (!zhp->zfs_mntcheck &&
(mntopt_on != NULL || prop == ZFS_PROP_MOUNTED)) {
libzfs_handle_t *hdl = zhp->zfs_hdl;
struct mnttab entry;
if (libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0) {
zhp->zfs_mntopts = zfs_strdup(hdl,
entry.mnt_mntopts);
if (zhp->zfs_mntopts == NULL)
return (-1);
}
zhp->zfs_mntcheck = B_TRUE;
}
if (zhp->zfs_mntopts == NULL)
mnt.mnt_mntopts = "";
else
mnt.mnt_mntopts = zhp->zfs_mntopts;
switch (prop) {
case ZFS_PROP_ATIME:
case ZFS_PROP_RELATIME:
case ZFS_PROP_DEVICES:
case ZFS_PROP_EXEC:
case ZFS_PROP_READONLY:
case ZFS_PROP_SETUID:
#ifndef __FreeBSD__
case ZFS_PROP_XATTR:
#endif
case ZFS_PROP_NBMAND:
*val = getprop_uint64(zhp, prop, source);
if (received)
break;
if (hasmntopt(&mnt, mntopt_on) && !*val) {
*val = B_TRUE;
if (src)
*src = ZPROP_SRC_TEMPORARY;
} else if (hasmntopt(&mnt, mntopt_off) && *val) {
*val = B_FALSE;
if (src)
*src = ZPROP_SRC_TEMPORARY;
}
break;
case ZFS_PROP_CANMOUNT:
case ZFS_PROP_VOLSIZE:
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
case ZFS_PROP_FILESYSTEM_COUNT:
case ZFS_PROP_SNAPSHOT_COUNT:
*val = getprop_uint64(zhp, prop, source);
if (*source == NULL) {
/* not default, must be local */
*source = zhp->zfs_name;
}
break;
case ZFS_PROP_MOUNTED:
*val = (zhp->zfs_mntopts != NULL);
break;
case ZFS_PROP_NUMCLONES:
*val = zhp->zfs_dmustats.dds_num_clones;
break;
case ZFS_PROP_VERSION:
case ZFS_PROP_NORMALIZE:
case ZFS_PROP_UTF8ONLY:
case ZFS_PROP_CASE:
if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_ZPLPROPS, &zc)) {
zcmd_free_nvlists(&zc);
if (prop == ZFS_PROP_VERSION &&
zhp->zfs_type == ZFS_TYPE_VOLUME)
*val = zfs_prop_default_numeric(prop);
return (-1);
}
if (zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &zplprops) != 0 ||
nvlist_lookup_uint64(zplprops, zfs_prop_to_name(prop),
val) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
nvlist_free(zplprops);
zcmd_free_nvlists(&zc);
break;
case ZFS_PROP_INCONSISTENT:
*val = zhp->zfs_dmustats.dds_inconsistent;
break;
case ZFS_PROP_REDACTED:
*val = zhp->zfs_dmustats.dds_redacted;
break;
default:
switch (zfs_prop_get_type(prop)) {
case PROP_TYPE_NUMBER:
case PROP_TYPE_INDEX:
*val = getprop_uint64(zhp, prop, source);
/*
* If we tried to use a default value for a
* readonly property, it means that it was not
* present. Note this only applies to "truly"
* readonly properties, not set-once properties
* like volblocksize.
*/
if (zfs_prop_readonly(prop) &&
!zfs_prop_setonce(prop) &&
*source != NULL && (*source)[0] == '\0') {
*source = NULL;
return (-1);
}
break;
case PROP_TYPE_STRING:
default:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"cannot get non-numeric property"));
return (zfs_error(zhp->zfs_hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "internal error")));
}
}
return (0);
}
/*
* Calculate the source type, given the raw source string.
*/
static void
get_source(zfs_handle_t *zhp, zprop_source_t *srctype, char *source,
char *statbuf, size_t statlen)
{
if (statbuf == NULL ||
srctype == NULL || *srctype == ZPROP_SRC_TEMPORARY) {
return;
}
if (source == NULL) {
*srctype = ZPROP_SRC_NONE;
} else if (source[0] == '\0') {
*srctype = ZPROP_SRC_DEFAULT;
} else if (strstr(source, ZPROP_SOURCE_VAL_RECVD) != NULL) {
*srctype = ZPROP_SRC_RECEIVED;
} else {
if (strcmp(source, zhp->zfs_name) == 0) {
*srctype = ZPROP_SRC_LOCAL;
} else {
(void) strlcpy(statbuf, source, statlen);
*srctype = ZPROP_SRC_INHERITED;
}
}
}
int
zfs_prop_get_recvd(zfs_handle_t *zhp, const char *propname, char *propbuf,
size_t proplen, boolean_t literal)
{
zfs_prop_t prop;
int err = 0;
if (zhp->zfs_recvd_props == NULL)
if (get_recvd_props_ioctl(zhp) != 0)
return (-1);
prop = zfs_name_to_prop(propname);
if (prop != ZPROP_INVAL) {
uint64_t cookie;
if (!nvlist_exists(zhp->zfs_recvd_props, propname))
return (-1);
zfs_set_recvd_props_mode(zhp, &cookie);
err = zfs_prop_get(zhp, prop, propbuf, proplen,
NULL, NULL, 0, literal);
zfs_unset_recvd_props_mode(zhp, &cookie);
} else {
nvlist_t *propval;
char *recvdval;
if (nvlist_lookup_nvlist(zhp->zfs_recvd_props,
propname, &propval) != 0)
return (-1);
verify(nvlist_lookup_string(propval, ZPROP_VALUE,
&recvdval) == 0);
(void) strlcpy(propbuf, recvdval, proplen);
}
return (err == 0 ? 0 : -1);
}
static int
get_clones_string(zfs_handle_t *zhp, char *propbuf, size_t proplen)
{
nvlist_t *value;
nvpair_t *pair;
value = zfs_get_clones_nvl(zhp);
if (value == NULL || nvlist_empty(value))
return (-1);
propbuf[0] = '\0';
for (pair = nvlist_next_nvpair(value, NULL); pair != NULL;
pair = nvlist_next_nvpair(value, pair)) {
if (propbuf[0] != '\0')
(void) strlcat(propbuf, ",", proplen);
(void) strlcat(propbuf, nvpair_name(pair), proplen);
}
return (0);
}
struct get_clones_arg {
uint64_t numclones;
nvlist_t *value;
const char *origin;
char buf[ZFS_MAX_DATASET_NAME_LEN];
};
static int
get_clones_cb(zfs_handle_t *zhp, void *arg)
{
struct get_clones_arg *gca = arg;
if (gca->numclones == 0) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get(zhp, ZFS_PROP_ORIGIN, gca->buf, sizeof (gca->buf),
NULL, NULL, 0, B_TRUE) != 0)
goto out;
if (strcmp(gca->buf, gca->origin) == 0) {
fnvlist_add_boolean(gca->value, zfs_get_name(zhp));
gca->numclones--;
}
out:
(void) zfs_iter_children(zhp, get_clones_cb, gca);
zfs_close(zhp);
return (0);
}
nvlist_t *
zfs_get_clones_nvl(zfs_handle_t *zhp)
{
nvlist_t *nv, *value;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), &nv) != 0) {
struct get_clones_arg gca;
/*
* if this is a snapshot, then the kernel wasn't able
* to get the clones. Do it by slowly iterating.
*/
if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT)
return (NULL);
if (nvlist_alloc(&nv, NV_UNIQUE_NAME, 0) != 0)
return (NULL);
if (nvlist_alloc(&value, NV_UNIQUE_NAME, 0) != 0) {
nvlist_free(nv);
return (NULL);
}
gca.numclones = zfs_prop_get_int(zhp, ZFS_PROP_NUMCLONES);
gca.value = value;
gca.origin = zhp->zfs_name;
if (gca.numclones != 0) {
zfs_handle_t *root;
char pool[ZFS_MAX_DATASET_NAME_LEN];
char *cp = pool;
/* get the pool name */
(void) strlcpy(pool, zhp->zfs_name, sizeof (pool));
(void) strsep(&cp, "/@");
root = zfs_open(zhp->zfs_hdl, pool,
ZFS_TYPE_FILESYSTEM);
if (root == NULL) {
nvlist_free(nv);
nvlist_free(value);
return (NULL);
}
(void) get_clones_cb(root, &gca);
}
if (gca.numclones != 0 ||
nvlist_add_nvlist(nv, ZPROP_VALUE, value) != 0 ||
nvlist_add_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), nv) != 0) {
nvlist_free(nv);
nvlist_free(value);
return (NULL);
}
nvlist_free(nv);
nvlist_free(value);
verify(0 == nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), &nv));
}
verify(nvlist_lookup_nvlist(nv, ZPROP_VALUE, &value) == 0);
return (value);
}
static int
get_rsnaps_string(zfs_handle_t *zhp, char *propbuf, size_t proplen)
{
nvlist_t *value;
uint64_t *snaps;
uint_t nsnaps;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS), &value) != 0)
return (-1);
if (nvlist_lookup_uint64_array(value, ZPROP_VALUE, &snaps,
&nsnaps) != 0)
return (-1);
if (nsnaps == 0) {
/* There's no redaction snapshots; pass a special value back */
(void) snprintf(propbuf, proplen, "none");
return (0);
}
propbuf[0] = '\0';
for (int i = 0; i < nsnaps; i++) {
char buf[128];
if (propbuf[0] != '\0')
(void) strlcat(propbuf, ",", proplen);
(void) snprintf(buf, sizeof (buf), "%llu",
(u_longlong_t)snaps[i]);
(void) strlcat(propbuf, buf, proplen);
}
return (0);
}
/*
* Accepts a property and value and checks that the value
* matches the one found by the channel program. If they are
* not equal, print both of them.
*/
static void
zcp_check(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t intval,
const char *strval)
{
if (!zhp->zfs_hdl->libzfs_prop_debug)
return;
int error;
char *poolname = zhp->zpool_hdl->zpool_name;
const char *prop_name = zfs_prop_to_name(prop);
const char *program =
"args = ...\n"
"ds = args['dataset']\n"
"prop = args['property']\n"
"value, setpoint = zfs.get_prop(ds, prop)\n"
"return {value=value, setpoint=setpoint}\n";
nvlist_t *outnvl;
nvlist_t *retnvl;
nvlist_t *argnvl = fnvlist_alloc();
fnvlist_add_string(argnvl, "dataset", zhp->zfs_name);
fnvlist_add_string(argnvl, "property", zfs_prop_to_name(prop));
error = lzc_channel_program_nosync(poolname, program,
10 * 1000 * 1000, 10 * 1024 * 1024, argnvl, &outnvl);
if (error == 0) {
retnvl = fnvlist_lookup_nvlist(outnvl, "return");
if (zfs_prop_get_type(prop) == PROP_TYPE_NUMBER) {
int64_t ans;
error = nvlist_lookup_int64(retnvl, "value", &ans);
if (error != 0) {
(void) fprintf(stderr, "%s: zcp check error: "
"%u\n", prop_name, error);
return;
}
if (ans != intval) {
(void) fprintf(stderr, "%s: zfs found %llu, "
"but zcp found %llu\n", prop_name,
(u_longlong_t)intval, (u_longlong_t)ans);
}
} else {
char *str_ans;
error = nvlist_lookup_string(retnvl, "value", &str_ans);
if (error != 0) {
(void) fprintf(stderr, "%s: zcp check error: "
"%u\n", prop_name, error);
return;
}
if (strcmp(strval, str_ans) != 0) {
(void) fprintf(stderr,
"%s: zfs found '%s', but zcp found '%s'\n",
prop_name, strval, str_ans);
}
}
} else {
(void) fprintf(stderr, "%s: zcp check failed, channel program "
"error: %u\n", prop_name, error);
}
nvlist_free(argnvl);
nvlist_free(outnvl);
}
/*
* Retrieve a property from the given object. If 'literal' is specified, then
* numbers are left as exact values. Otherwise, numbers are converted to a
* human-readable form.
*
* Returns 0 on success, or -1 on error.
*/
int
zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
zprop_source_t *src, char *statbuf, size_t statlen, boolean_t literal)
{
char *source = NULL;
uint64_t val;
const char *str;
const char *strval;
boolean_t received = zfs_is_recvd_props_mode(zhp);
/*
* Check to see if this property applies to our object
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE))
return (-1);
if (received && zfs_prop_readonly(prop))
return (-1);
if (src)
*src = ZPROP_SRC_NONE;
switch (prop) {
case ZFS_PROP_CREATION:
/*
* 'creation' is a time_t stored in the statistics. We convert
* this into a string unless 'literal' is specified.
*/
{
val = getprop_uint64(zhp, prop, &source);
time_t time = (time_t)val;
struct tm t;
if (literal ||
localtime_r(&time, &t) == NULL ||
strftime(propbuf, proplen, "%a %b %e %k:%M %Y",
&t) == 0)
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
}
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_MOUNTPOINT:
/*
* Getting the precise mountpoint can be tricky.
*
* - for 'none' or 'legacy', return those values.
* - for inherited mountpoints, we want to take everything
* after our ancestor and append it to the inherited value.
*
* If the pool has an alternate root, we want to prepend that
* root to any values we return.
*/
str = getprop_string(zhp, prop, &source);
if (str[0] == '/') {
char buf[MAXPATHLEN];
char *root = buf;
const char *relpath;
/*
* If we inherit the mountpoint, even from a dataset
* with a received value, the source will be the path of
* the dataset we inherit from. If source is
* ZPROP_SOURCE_VAL_RECVD, the received value is not
* inherited.
*/
if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) {
relpath = "";
} else {
relpath = zhp->zfs_name + strlen(source);
if (relpath[0] == '/')
relpath++;
}
if ((zpool_get_prop(zhp->zpool_hdl,
ZPOOL_PROP_ALTROOT, buf, MAXPATHLEN, NULL,
B_FALSE)) || (strcmp(root, "-") == 0))
root[0] = '\0';
/*
* Special case an alternate root of '/'. This will
* avoid having multiple leading slashes in the
* mountpoint path.
*/
if (strcmp(root, "/") == 0)
root++;
/*
* If the mountpoint is '/' then skip over this
* if we are obtaining either an alternate root or
* an inherited mountpoint.
*/
if (str[1] == '\0' && (root[0] != '\0' ||
relpath[0] != '\0'))
str++;
if (relpath[0] == '\0')
(void) snprintf(propbuf, proplen, "%s%s",
root, str);
else
(void) snprintf(propbuf, proplen, "%s%s%s%s",
root, str, relpath[0] == '@' ? "" : "/",
relpath);
} else {
/* 'legacy' or 'none' */
(void) strlcpy(propbuf, str, proplen);
}
zcp_check(zhp, prop, 0, propbuf);
break;
case ZFS_PROP_ORIGIN:
str = getprop_string(zhp, prop, &source);
if (str == NULL)
return (-1);
(void) strlcpy(propbuf, str, proplen);
zcp_check(zhp, prop, 0, str);
break;
case ZFS_PROP_REDACT_SNAPS:
if (get_rsnaps_string(zhp, propbuf, proplen) != 0)
return (-1);
break;
case ZFS_PROP_CLONES:
if (get_clones_string(zhp, propbuf, proplen) != 0)
return (-1);
break;
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
/*
* If quota or reservation is 0, we translate this into 'none'
* (unless literal is set), and indicate that it's the default
* value. Otherwise, we print the number nicely and indicate
* that its set locally.
*/
if (val == 0) {
if (literal)
(void) strlcpy(propbuf, "0", proplen);
else
(void) strlcpy(propbuf, "none", proplen);
} else {
if (literal)
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
else
zfs_nicebytes(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
case ZFS_PROP_FILESYSTEM_COUNT:
case ZFS_PROP_SNAPSHOT_COUNT:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
/*
* If limit is UINT64_MAX, we translate this into 'none' (unless
* literal is set), and indicate that it's the default value.
* Otherwise, we print the number nicely and indicate that it's
* set locally.
*/
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
} else if (val == UINT64_MAX) {
(void) strlcpy(propbuf, "none", proplen);
} else {
zfs_nicenum(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_REFRATIO:
case ZFS_PROP_COMPRESSRATIO:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
if (literal)
(void) snprintf(propbuf, proplen, "%llu.%02llu",
(u_longlong_t)(val / 100),
(u_longlong_t)(val % 100));
else
(void) snprintf(propbuf, proplen, "%llu.%02llux",
(u_longlong_t)(val / 100),
(u_longlong_t)(val % 100));
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_TYPE:
switch (zhp->zfs_type) {
case ZFS_TYPE_FILESYSTEM:
str = "filesystem";
break;
case ZFS_TYPE_VOLUME:
str = "volume";
break;
case ZFS_TYPE_SNAPSHOT:
str = "snapshot";
break;
case ZFS_TYPE_BOOKMARK:
str = "bookmark";
break;
default:
abort();
}
(void) snprintf(propbuf, proplen, "%s", str);
zcp_check(zhp, prop, 0, propbuf);
break;
case ZFS_PROP_MOUNTED:
/*
* The 'mounted' property is a pseudo-property that described
* whether the filesystem is currently mounted. Even though
* it's a boolean value, the typical values of "on" and "off"
* don't make sense, so we translate to "yes" and "no".
*/
if (get_numeric_property(zhp, ZFS_PROP_MOUNTED,
src, &source, &val) != 0)
return (-1);
if (val)
(void) strlcpy(propbuf, "yes", proplen);
else
(void) strlcpy(propbuf, "no", proplen);
break;
case ZFS_PROP_NAME:
/*
* The 'name' property is a pseudo-property derived from the
* dataset name. It is presented as a real property to simplify
* consumers.
*/
(void) strlcpy(propbuf, zhp->zfs_name, proplen);
zcp_check(zhp, prop, 0, propbuf);
break;
case ZFS_PROP_MLSLABEL:
{
#ifdef HAVE_MLSLABEL
m_label_t *new_sl = NULL;
char *ascii = NULL; /* human readable label */
(void) strlcpy(propbuf,
getprop_string(zhp, prop, &source), proplen);
if (literal || (strcasecmp(propbuf,
ZFS_MLSLABEL_DEFAULT) == 0))
break;
/*
* Try to translate the internal hex string to
* human-readable output. If there are any
* problems just use the hex string.
*/
if (str_to_label(propbuf, &new_sl, MAC_LABEL,
L_NO_CORRECTION, NULL) == -1) {
m_label_free(new_sl);
break;
}
if (label_to_str(new_sl, &ascii, M_LABEL,
DEF_NAMES) != 0) {
if (ascii)
free(ascii);
m_label_free(new_sl);
break;
}
m_label_free(new_sl);
(void) strlcpy(propbuf, ascii, proplen);
free(ascii);
#else
(void) strlcpy(propbuf,
getprop_string(zhp, prop, &source), proplen);
#endif /* HAVE_MLSLABEL */
}
break;
case ZFS_PROP_GUID:
case ZFS_PROP_CREATETXG:
case ZFS_PROP_OBJSETID:
case ZFS_PROP_PBKDF2_ITERS:
/*
* These properties are stored as numbers, but they are
* identifiers or counters.
* We don't want them to be pretty printed, because pretty
* printing truncates their values making them useless.
*/
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
(void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val);
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_REFERENCED:
case ZFS_PROP_AVAILABLE:
case ZFS_PROP_USED:
case ZFS_PROP_USEDSNAP:
case ZFS_PROP_USEDDS:
case ZFS_PROP_USEDREFRESERV:
case ZFS_PROP_USEDCHILD:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
} else {
zfs_nicebytes(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
default:
switch (zfs_prop_get_type(prop)) {
case PROP_TYPE_NUMBER:
if (get_numeric_property(zhp, prop, src,
&source, &val) != 0) {
return (-1);
}
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
} else {
zfs_nicenum(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
case PROP_TYPE_STRING:
str = getprop_string(zhp, prop, &source);
if (str == NULL)
return (-1);
(void) strlcpy(propbuf, str, proplen);
zcp_check(zhp, prop, 0, str);
break;
case PROP_TYPE_INDEX:
if (get_numeric_property(zhp, prop, src,
&source, &val) != 0)
return (-1);
if (zfs_prop_index_to_string(prop, val, &strval) != 0)
return (-1);
(void) strlcpy(propbuf, strval, proplen);
zcp_check(zhp, prop, 0, strval);
break;
default:
abort();
}
}
get_source(zhp, src, source, statbuf, statlen);
return (0);
}
/*
* Utility function to get the given numeric property. Does no validation that
* the given property is the appropriate type; should only be used with
* hard-coded property types.
*/
uint64_t
zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop)
{
char *source;
uint64_t val = 0;
(void) get_numeric_property(zhp, prop, NULL, &source, &val);
return (val);
}
static int
zfs_prop_set_int(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t val)
{
char buf[64];
(void) snprintf(buf, sizeof (buf), "%llu", (longlong_t)val);
return (zfs_prop_set(zhp, zfs_prop_to_name(prop), buf));
}
/*
* Similar to zfs_prop_get(), but returns the value as an integer.
*/
int
zfs_prop_get_numeric(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t *value,
zprop_source_t *src, char *statbuf, size_t statlen)
{
char *source;
/*
* Check to see if this property applies to our object
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE)) {
return (zfs_error_fmt(zhp->zfs_hdl, EZFS_PROPTYPE,
dgettext(TEXT_DOMAIN, "cannot get property '%s'"),
zfs_prop_to_name(prop)));
}
if (src)
*src = ZPROP_SRC_NONE;
if (get_numeric_property(zhp, prop, src, &source, value) != 0)
return (-1);
get_source(zhp, src, source, statbuf, statlen);
return (0);
}
#ifdef HAVE_IDMAP
static int
idmap_id_to_numeric_domain_rid(uid_t id, boolean_t isuser,
char **domainp, idmap_rid_t *ridp)
{
idmap_get_handle_t *get_hdl = NULL;
idmap_stat status;
int err = EINVAL;
if (idmap_get_create(&get_hdl) != IDMAP_SUCCESS)
goto out;
if (isuser) {
err = idmap_get_sidbyuid(get_hdl, id,
IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
} else {
err = idmap_get_sidbygid(get_hdl, id,
IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
}
if (err == IDMAP_SUCCESS &&
idmap_get_mappings(get_hdl) == IDMAP_SUCCESS &&
status == IDMAP_SUCCESS)
err = 0;
else
err = EINVAL;
out:
if (get_hdl)
idmap_get_destroy(get_hdl);
return (err);
}
#endif /* HAVE_IDMAP */
/*
* convert the propname into parameters needed by kernel
* Eg: userquota@ahrens -> ZFS_PROP_USERQUOTA, "", 126829
* Eg: userused@matt@domain -> ZFS_PROP_USERUSED, "S-1-123-456", 789
* Eg: groupquota@staff -> ZFS_PROP_GROUPQUOTA, "", 1234
* Eg: groupused@staff -> ZFS_PROP_GROUPUSED, "", 1234
* Eg: projectquota@123 -> ZFS_PROP_PROJECTQUOTA, "", 123
* Eg: projectused@789 -> ZFS_PROP_PROJECTUSED, "", 789
*/
static int
userquota_propname_decode(const char *propname, boolean_t zoned,
zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp)
{
zfs_userquota_prop_t type;
char *cp;
boolean_t isuser;
boolean_t isgroup;
boolean_t isproject;
struct passwd *pw;
struct group *gr;
domain[0] = '\0';
/* Figure out the property type ({user|group|project}{quota|space}) */
for (type = 0; type < ZFS_NUM_USERQUOTA_PROPS; type++) {
if (strncmp(propname, zfs_userquota_prop_prefixes[type],
strlen(zfs_userquota_prop_prefixes[type])) == 0)
break;
}
if (type == ZFS_NUM_USERQUOTA_PROPS)
return (EINVAL);
*typep = type;
isuser = (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_USERUSED ||
type == ZFS_PROP_USEROBJQUOTA ||
type == ZFS_PROP_USEROBJUSED);
isgroup = (type == ZFS_PROP_GROUPQUOTA || type == ZFS_PROP_GROUPUSED ||
type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_GROUPOBJUSED);
isproject = (type == ZFS_PROP_PROJECTQUOTA ||
type == ZFS_PROP_PROJECTUSED || type == ZFS_PROP_PROJECTOBJQUOTA ||
type == ZFS_PROP_PROJECTOBJUSED);
cp = strchr(propname, '@') + 1;
if (isuser && (pw = getpwnam(cp)) != NULL) {
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
*ridp = pw->pw_uid;
} else if (isgroup && (gr = getgrnam(cp)) != NULL) {
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
*ridp = gr->gr_gid;
} else if (!isproject && strchr(cp, '@')) {
#ifdef HAVE_IDMAP
/*
* It's a SID name (eg "user@domain") that needs to be
* turned into S-1-domainID-RID.
*/
directory_error_t e;
char *numericsid = NULL;
char *end;
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
if (isuser) {
e = directory_sid_from_user_name(NULL,
cp, &numericsid);
} else {
e = directory_sid_from_group_name(NULL,
cp, &numericsid);
}
if (e != NULL) {
directory_error_free(e);
return (ENOENT);
}
if (numericsid == NULL)
return (ENOENT);
cp = numericsid;
(void) strlcpy(domain, cp, domainlen);
cp = strrchr(domain, '-');
*cp = '\0';
cp++;
errno = 0;
*ridp = strtoull(cp, &end, 10);
free(numericsid);
if (errno != 0 || *end != '\0')
return (EINVAL);
#else
return (ENOSYS);
#endif /* HAVE_IDMAP */
} else {
/* It's a user/group/project ID (eg "12345"). */
uid_t id;
char *end;
id = strtoul(cp, &end, 10);
if (*end != '\0')
return (EINVAL);
if (id > MAXUID && !isproject) {
#ifdef HAVE_IDMAP
/* It's an ephemeral ID. */
idmap_rid_t rid;
char *mapdomain;
if (idmap_id_to_numeric_domain_rid(id, isuser,
&mapdomain, &rid) != 0)
return (ENOENT);
(void) strlcpy(domain, mapdomain, domainlen);
*ridp = rid;
#else
return (ENOSYS);
#endif /* HAVE_IDMAP */
} else {
*ridp = id;
}
}
return (0);
}
static int
zfs_prop_get_userquota_common(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue, zfs_userquota_prop_t *typep)
{
int err;
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
err = userquota_propname_decode(propname,
zfs_prop_get_int(zhp, ZFS_PROP_ZONED),
typep, zc.zc_value, sizeof (zc.zc_value), &zc.zc_guid);
zc.zc_objset_type = *typep;
if (err)
return (err);
err = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_USERSPACE_ONE, &zc);
if (err)
return (err);
*propvalue = zc.zc_cookie;
return (0);
}
int
zfs_prop_get_userquota_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue)
{
zfs_userquota_prop_t type;
return (zfs_prop_get_userquota_common(zhp, propname, propvalue,
&type));
}
int
zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal)
{
int err;
uint64_t propvalue;
zfs_userquota_prop_t type;
err = zfs_prop_get_userquota_common(zhp, propname, &propvalue,
&type);
if (err)
return (err);
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)propvalue);
} else if (propvalue == 0 &&
(type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA ||
type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_PROJECTQUOTA ||
type == ZFS_PROP_PROJECTOBJQUOTA)) {
(void) strlcpy(propbuf, "none", proplen);
} else if (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA ||
type == ZFS_PROP_USERUSED || type == ZFS_PROP_GROUPUSED ||
type == ZFS_PROP_PROJECTUSED || type == ZFS_PROP_PROJECTQUOTA) {
zfs_nicebytes(propvalue, propbuf, proplen);
} else {
zfs_nicenum(propvalue, propbuf, proplen);
}
return (0);
}
/*
* propname must start with "written@" or "written#".
*/
int
zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue)
{
int err;
zfs_cmd_t zc = {"\0"};
const char *snapname;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
assert(zfs_prop_written(propname));
snapname = propname + strlen("written@");
if (strchr(snapname, '@') != NULL || strchr(snapname, '#') != NULL) {
/* full snapshot or bookmark name specified */
(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
} else {
/* snapname is the short name, append it to zhp's fsname */
char *cp;
(void) strlcpy(zc.zc_value, zhp->zfs_name,
sizeof (zc.zc_value));
cp = strchr(zc.zc_value, '@');
if (cp != NULL)
*cp = '\0';
(void) strlcat(zc.zc_value, snapname - 1, sizeof (zc.zc_value));
}
err = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SPACE_WRITTEN, &zc);
if (err)
return (err);
*propvalue = zc.zc_cookie;
return (0);
}
int
zfs_prop_get_written(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal)
{
int err;
uint64_t propvalue;
err = zfs_prop_get_written_int(zhp, propname, &propvalue);
if (err)
return (err);
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)propvalue);
} else {
zfs_nicebytes(propvalue, propbuf, proplen);
}
return (0);
}
/*
* Returns the name of the given zfs handle.
*/
const char *
zfs_get_name(const zfs_handle_t *zhp)
{
return (zhp->zfs_name);
}
/*
* Returns the name of the parent pool for the given zfs handle.
*/
const char *
zfs_get_pool_name(const zfs_handle_t *zhp)
{
return (zhp->zpool_hdl->zpool_name);
}
/*
* Returns the type of the given zfs handle.
*/
zfs_type_t
zfs_get_type(const zfs_handle_t *zhp)
{
return (zhp->zfs_type);
}
/*
* Returns the type of the given zfs handle,
* or, if a snapshot, the type of the snapshotted dataset.
*/
zfs_type_t
zfs_get_underlying_type(const zfs_handle_t *zhp)
{
return (zhp->zfs_head_type);
}
/*
* Is one dataset name a child dataset of another?
*
* Needs to handle these cases:
* Dataset 1 "a/foo" "a/foo" "a/foo" "a/foo"
* Dataset 2 "a/fo" "a/foobar" "a/bar/baz" "a/foo/bar"
* Descendant? No. No. No. Yes.
*/
static boolean_t
is_descendant(const char *ds1, const char *ds2)
{
size_t d1len = strlen(ds1);
/* ds2 can't be a descendant if it's smaller */
if (strlen(ds2) < d1len)
return (B_FALSE);
/* otherwise, compare strings and verify that there's a '/' char */
return (ds2[d1len] == '/' && (strncmp(ds1, ds2, d1len) == 0));
}
/*
* Given a complete name, return just the portion that refers to the parent.
* Will return -1 if there is no parent (path is just the name of the
* pool).
*/
static int
parent_name(const char *path, char *buf, size_t buflen)
{
char *slashp;
(void) strlcpy(buf, path, buflen);
if ((slashp = strrchr(buf, '/')) == NULL)
return (-1);
*slashp = '\0';
return (0);
}
int
zfs_parent_name(zfs_handle_t *zhp, char *buf, size_t buflen)
{
return (parent_name(zfs_get_name(zhp), buf, buflen));
}
/*
* If accept_ancestor is false, then check to make sure that the given path has
* a parent, and that it exists. If accept_ancestor is true, then find the
* closest existing ancestor for the given path. In prefixlen return the
* length of already existing prefix of the given path. We also fetch the
* 'zoned' property, which is used to validate property settings when creating
* new datasets.
*/
static int
check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned,
boolean_t accept_ancestor, int *prefixlen)
{
zfs_cmd_t zc = {"\0"};
char parent[ZFS_MAX_DATASET_NAME_LEN];
char *slash;
zfs_handle_t *zhp;
char errbuf[1024];
uint64_t is_zoned;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot create '%s'"), path);
/* get parent, and check to see if this is just a pool */
if (parent_name(path, parent, sizeof (parent)) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing dataset name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
/* check to see if the pool exists */
if ((slash = strchr(parent, '/')) == NULL)
slash = parent + strlen(parent);
(void) strncpy(zc.zc_name, parent, slash - parent);
zc.zc_name[slash - parent] = '\0';
if (zfs_ioctl(hdl, ZFS_IOC_OBJSET_STATS, &zc) != 0 &&
errno == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such pool '%s'"), zc.zc_name);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
/* check to see if the parent dataset exists */
while ((zhp = make_dataset_handle(hdl, parent)) == NULL) {
if (errno == ENOENT && accept_ancestor) {
/*
* Go deeper to find an ancestor, give up on top level.
*/
if (parent_name(parent, parent, sizeof (parent)) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such pool '%s'"), zc.zc_name);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
} else if (errno == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent does not exist"));
return (zfs_error(hdl, EZFS_NOENT, errbuf));
} else
return (zfs_standard_error(hdl, errno, errbuf));
}
is_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
if (zoned != NULL)
*zoned = is_zoned;
/* we are in a non-global zone, but parent is in the global zone */
if (getzoneid() != GLOBAL_ZONEID && !is_zoned) {
(void) zfs_standard_error(hdl, EPERM, errbuf);
zfs_close(zhp);
return (-1);
}
/* make sure parent is a filesystem */
if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent is not a filesystem"));
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
zfs_close(zhp);
return (-1);
}
zfs_close(zhp);
if (prefixlen != NULL)
*prefixlen = strlen(parent);
return (0);
}
/*
* Finds whether the dataset of the given type(s) exists.
*/
boolean_t
zfs_dataset_exists(libzfs_handle_t *hdl, const char *path, zfs_type_t types)
{
zfs_handle_t *zhp;
if (!zfs_validate_name(hdl, path, types, B_FALSE))
return (B_FALSE);
/*
* Try to get stats for the dataset, which will tell us if it exists.
*/
if ((zhp = make_dataset_handle(hdl, path)) != NULL) {
int ds_type = zhp->zfs_type;
zfs_close(zhp);
if (types & ds_type)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Given a path to 'target', create all the ancestors between
* the prefixlen portion of the path, and the target itself.
* Fail if the initial prefixlen-ancestor does not already exist.
*/
int
create_parents(libzfs_handle_t *hdl, char *target, int prefixlen)
{
zfs_handle_t *h;
char *cp;
const char *opname;
/* make sure prefix exists */
cp = target + prefixlen;
if (*cp != '/') {
assert(strchr(cp, '/') == NULL);
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
} else {
*cp = '\0';
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
*cp = '/';
}
if (h == NULL)
return (-1);
zfs_close(h);
/*
* Attempt to create, mount, and share any ancestor filesystems,
* up to the prefixlen-long one.
*/
for (cp = target + prefixlen + 1;
(cp = strchr(cp, '/')) != NULL; *cp = '/', cp++) {
*cp = '\0';
h = make_dataset_handle(hdl, target);
if (h) {
/* it already exists, nothing to do here */
zfs_close(h);
continue;
}
if (zfs_create(hdl, target, ZFS_TYPE_FILESYSTEM,
NULL) != 0) {
opname = dgettext(TEXT_DOMAIN, "create");
goto ancestorerr;
}
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
if (h == NULL) {
opname = dgettext(TEXT_DOMAIN, "open");
goto ancestorerr;
}
if (zfs_mount(h, NULL, 0) != 0) {
opname = dgettext(TEXT_DOMAIN, "mount");
goto ancestorerr;
}
if (zfs_share(h) != 0) {
opname = dgettext(TEXT_DOMAIN, "share");
goto ancestorerr;
}
zfs_close(h);
}
zfs_commit_all_shares();
return (0);
ancestorerr:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to %s ancestor '%s'"), opname, target);
return (-1);
}
/*
* Creates non-existing ancestors of the given path.
*/
int
zfs_create_ancestors(libzfs_handle_t *hdl, const char *path)
{
int prefix;
char *path_copy;
char errbuf[1024];
int rc = 0;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), path);
/*
* Check that we are not passing the nesting limit
* before we start creating any ancestors.
*/
if (dataset_nestcheck(path) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"maximum name nesting depth exceeded"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
if (check_parents(hdl, path, NULL, B_TRUE, &prefix) != 0)
return (-1);
if ((path_copy = strdup(path)) != NULL) {
rc = create_parents(hdl, path_copy, prefix);
free(path_copy);
}
if (path_copy == NULL || rc != 0)
return (-1);
return (0);
}
/*
* Create a new filesystem or volume.
*/
int
zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type,
nvlist_t *props)
{
int ret;
uint64_t size = 0;
uint64_t blocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
uint64_t zoned;
enum lzc_dataset_type ost;
zpool_handle_t *zpool_handle;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
char errbuf[1024];
char parent[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), path);
/* validate the path, taking care to note the extended error message */
if (!zfs_validate_name(hdl, path, type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
if (dataset_nestcheck(path) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"maximum name nesting depth exceeded"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
/* validate parents exist */
if (check_parents(hdl, path, &zoned, B_FALSE, NULL) != 0)
return (-1);
/*
* The failure modes when creating a dataset of a different type over
* one that already exists is a little strange. In particular, if you
* try to create a dataset on top of an existing dataset, the ioctl()
* will return ENOENT, not EEXIST. To prevent this from happening, we
* first try to see if the dataset exists.
*/
if (zfs_dataset_exists(hdl, path, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset already exists"));
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
}
if (type == ZFS_TYPE_VOLUME)
ost = LZC_DATSET_TYPE_ZVOL;
else
ost = LZC_DATSET_TYPE_ZFS;
/* open zpool handle for prop validation */
char pool_path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(pool_path, path, sizeof (pool_path));
/* truncate pool_path at first slash */
char *p = strchr(pool_path, '/');
if (p != NULL)
*p = '\0';
if ((zpool_handle = zpool_open(hdl, pool_path)) == NULL)
return (-1);
if (props && (props = zfs_valid_proplist(hdl, type, props,
zoned, NULL, zpool_handle, B_TRUE, errbuf)) == 0) {
zpool_close(zpool_handle);
return (-1);
}
zpool_close(zpool_handle);
if (type == ZFS_TYPE_VOLUME) {
/*
* If we are creating a volume, the size and block size must
* satisfy a few restraints. First, the blocksize must be a
* valid block size between SPA_{MIN,MAX}BLOCKSIZE. Second, the
* volsize must be a multiple of the block size, and cannot be
* zero.
*/
if (props == NULL || nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &size) != 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing volume size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
if ((ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
&blocksize)) != 0) {
if (ret == ENOENT) {
blocksize = zfs_prop_default_numeric(
ZFS_PROP_VOLBLOCKSIZE);
} else {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing volume block size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
}
if (size == 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"volume size cannot be zero"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
if (size % blocksize != 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"volume size must be a multiple of volume block "
"size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
}
(void) parent_name(path, parent, sizeof (parent));
if (zfs_crypto_create(hdl, parent, props, NULL, B_TRUE,
&wkeydata, &wkeylen) != 0) {
nvlist_free(props);
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
}
/* create the dataset */
ret = lzc_create(path, ost, props, wkeydata, wkeylen);
nvlist_free(props);
if (wkeydata != NULL)
free(wkeydata);
/* check for failure */
if (ret != 0) {
switch (errno) {
case ENOENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such parent '%s'"), parent);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to set this "
"property or value"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption root's key is not loaded "
"or provided"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ERANGE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property value(s) specified"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
#ifdef _ILP32
case EOVERFLOW:
/*
* This platform can't address a volume this big.
*/
if (type == ZFS_TYPE_VOLUME)
return (zfs_error(hdl, EZFS_VOLTOOBIG,
errbuf));
+ fallthrough;
#endif
- /* FALLTHROUGH */
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
return (0);
}
/*
* Destroys the given dataset. The caller must make sure that the filesystem
* isn't mounted, and that there are no active dependents. If the file system
* does not exist this function does nothing.
*/
int
zfs_destroy(zfs_handle_t *zhp, boolean_t defer)
{
int error;
if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT && defer)
return (EINVAL);
if (zhp->zfs_type == ZFS_TYPE_BOOKMARK) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_boolean(nv, zhp->zfs_name);
error = lzc_destroy_bookmarks(nv, NULL);
fnvlist_free(nv);
if (error != 0) {
return (zfs_standard_error_fmt(zhp->zfs_hdl, error,
dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
zhp->zfs_name));
}
return (0);
}
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_boolean(nv, zhp->zfs_name);
error = lzc_destroy_snaps(nv, defer, NULL);
fnvlist_free(nv);
} else {
error = lzc_destroy(zhp->zfs_name);
}
if (error != 0 && error != ENOENT) {
return (zfs_standard_error_fmt(zhp->zfs_hdl, errno,
dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
zhp->zfs_name));
}
remove_mountpoint(zhp);
return (0);
}
struct destroydata {
nvlist_t *nvl;
const char *snapname;
};
static int
zfs_check_snap_cb(zfs_handle_t *zhp, void *arg)
{
struct destroydata *dd = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
dd->snapname) >= sizeof (name))
return (EINVAL);
if (lzc_exists(name))
verify(nvlist_add_boolean(dd->nvl, name) == 0);
rv = zfs_iter_filesystems(zhp, zfs_check_snap_cb, dd);
zfs_close(zhp);
return (rv);
}
/*
* Destroys all snapshots with the given name in zhp & descendants.
*/
int
zfs_destroy_snaps(zfs_handle_t *zhp, char *snapname, boolean_t defer)
{
int ret;
struct destroydata dd = { 0 };
dd.snapname = snapname;
verify(nvlist_alloc(&dd.nvl, NV_UNIQUE_NAME, 0) == 0);
(void) zfs_check_snap_cb(zfs_handle_dup(zhp), &dd);
if (nvlist_empty(dd.nvl)) {
ret = zfs_standard_error_fmt(zhp->zfs_hdl, ENOENT,
dgettext(TEXT_DOMAIN, "cannot destroy '%s@%s'"),
zhp->zfs_name, snapname);
} else {
ret = zfs_destroy_snaps_nvl(zhp->zfs_hdl, dd.nvl, defer);
}
nvlist_free(dd.nvl);
return (ret);
}
/*
* Destroys all the snapshots named in the nvlist.
*/
int
zfs_destroy_snaps_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, boolean_t defer)
{
int ret;
nvlist_t *errlist = NULL;
nvpair_t *pair;
ret = lzc_destroy_snaps(snaps, defer, &errlist);
if (ret == 0) {
nvlist_free(errlist);
return (0);
}
if (nvlist_empty(errlist)) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot destroy snapshots"));
ret = zfs_standard_error(hdl, ret, errbuf);
}
for (pair = nvlist_next_nvpair(errlist, NULL);
pair != NULL; pair = nvlist_next_nvpair(errlist, pair)) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot destroy snapshot %s"),
nvpair_name(pair));
switch (fnvpair_value_int32(pair)) {
case EEXIST:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "snapshot is cloned"));
ret = zfs_error(hdl, EZFS_EXISTS, errbuf);
break;
default:
ret = zfs_standard_error(hdl, errno, errbuf);
break;
}
}
nvlist_free(errlist);
return (ret);
}
/*
* Clones the given dataset. The target must be of the same type as the source.
*/
int
zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props)
{
char parent[ZFS_MAX_DATASET_NAME_LEN];
int ret;
char errbuf[1024];
libzfs_handle_t *hdl = zhp->zfs_hdl;
uint64_t zoned;
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), target);
/* validate the target/clone name */
if (!zfs_validate_name(hdl, target, ZFS_TYPE_FILESYSTEM, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents exist */
if (check_parents(hdl, target, &zoned, B_FALSE, NULL) != 0)
return (-1);
(void) parent_name(target, parent, sizeof (parent));
/* do the clone */
if (props) {
zfs_type_t type;
if (ZFS_IS_VOLUME(zhp)) {
type = ZFS_TYPE_VOLUME;
} else {
type = ZFS_TYPE_FILESYSTEM;
}
if ((props = zfs_valid_proplist(hdl, type, props, zoned,
zhp, zhp->zpool_hdl, B_TRUE, errbuf)) == NULL)
return (-1);
if (zfs_fix_auto_resv(zhp, props) == -1) {
nvlist_free(props);
return (-1);
}
}
if (zfs_crypto_clone_check(hdl, zhp, parent, props) != 0) {
nvlist_free(props);
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
}
ret = lzc_clone(target, zhp->zfs_name, props);
nvlist_free(props);
if (ret != 0) {
switch (errno) {
case ENOENT:
/*
* The parent doesn't exist. We should have caught this
* above, but there may a race condition that has since
* destroyed the parent.
*
* At this point, we don't know whether it's the source
* that doesn't exist anymore, or whether the target
* dataset doesn't exist.
*/
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"no such parent '%s'"), parent);
return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
case EXDEV:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"source and target pools differ"));
return (zfs_error(zhp->zfs_hdl, EZFS_CROSSTARGET,
errbuf));
default:
return (zfs_standard_error(zhp->zfs_hdl, errno,
errbuf));
}
}
return (ret);
}
/*
* Promotes the given clone fs to be the clone parent.
*/
int
zfs_promote(zfs_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
char snapname[ZFS_MAX_DATASET_NAME_LEN];
int ret;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot promote '%s'"), zhp->zfs_name);
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshots can not be promoted"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (zhp->zfs_dmustats.dds_origin[0] == '\0') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not a cloned filesystem"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
ret = lzc_promote(zhp->zfs_name, snapname, sizeof (snapname));
if (ret != 0) {
switch (ret) {
case EACCES:
/*
* Promoting encrypted dataset outside its
* encryption root.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot promote dataset outside its "
"encryption root"));
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
case EEXIST:
/* There is a conflicting snapshot name. */
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"conflicting snapshot '%s' from parent '%s'"),
snapname, zhp->zfs_dmustats.dds_origin);
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
default:
return (zfs_standard_error(hdl, ret, errbuf));
}
}
return (ret);
}
typedef struct snapdata {
nvlist_t *sd_nvl;
const char *sd_snapname;
} snapdata_t;
static int
zfs_snapshot_cb(zfs_handle_t *zhp, void *arg)
{
snapdata_t *sd = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) == 0) {
if (snprintf(name, sizeof (name), "%s@%s", zfs_get_name(zhp),
sd->sd_snapname) >= sizeof (name))
return (EINVAL);
fnvlist_add_boolean(sd->sd_nvl, name);
rv = zfs_iter_filesystems(zhp, zfs_snapshot_cb, sd);
}
zfs_close(zhp);
return (rv);
}
/*
* Creates snapshots. The keys in the snaps nvlist are the snapshots to be
* created.
*/
int
zfs_snapshot_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, nvlist_t *props)
{
int ret;
char errbuf[1024];
nvpair_t *elem;
nvlist_t *errors;
zpool_handle_t *zpool_hdl;
char pool[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create snapshots "));
elem = NULL;
while ((elem = nvlist_next_nvpair(snaps, elem)) != NULL) {
const char *snapname = nvpair_name(elem);
/* validate the target name */
if (!zfs_validate_name(hdl, snapname, ZFS_TYPE_SNAPSHOT,
B_TRUE)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create snapshot '%s'"), snapname);
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
}
/*
* get pool handle for prop validation. assumes all snaps are in the
* same pool, as does lzc_snapshot (below).
*/
elem = nvlist_next_nvpair(snaps, NULL);
(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
pool[strcspn(pool, "/@")] = '\0';
zpool_hdl = zpool_open(hdl, pool);
if (zpool_hdl == NULL)
return (-1);
if (props != NULL &&
(props = zfs_valid_proplist(hdl, ZFS_TYPE_SNAPSHOT,
props, B_FALSE, NULL, zpool_hdl, B_FALSE, errbuf)) == NULL) {
zpool_close(zpool_hdl);
return (-1);
}
zpool_close(zpool_hdl);
ret = lzc_snapshot(snaps, props, &errors);
if (ret != 0) {
boolean_t printed = B_FALSE;
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create snapshot '%s'"), nvpair_name(elem));
(void) zfs_standard_error(hdl,
fnvpair_value_int32(elem), errbuf);
printed = B_TRUE;
}
if (!printed) {
switch (ret) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple snapshots of same "
"fs not allowed"));
(void) zfs_error(hdl, EZFS_EXISTS, errbuf);
break;
default:
(void) zfs_standard_error(hdl, ret, errbuf);
}
}
}
nvlist_free(props);
nvlist_free(errors);
return (ret);
}
int
zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive,
nvlist_t *props)
{
int ret;
snapdata_t sd = { 0 };
char fsname[ZFS_MAX_DATASET_NAME_LEN];
char *cp;
zfs_handle_t *zhp;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot snapshot %s"), path);
if (!zfs_validate_name(hdl, path, ZFS_TYPE_SNAPSHOT, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
(void) strlcpy(fsname, path, sizeof (fsname));
cp = strchr(fsname, '@');
*cp = '\0';
sd.sd_snapname = cp + 1;
if ((zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) == NULL) {
return (-1);
}
verify(nvlist_alloc(&sd.sd_nvl, NV_UNIQUE_NAME, 0) == 0);
if (recursive) {
(void) zfs_snapshot_cb(zfs_handle_dup(zhp), &sd);
} else {
fnvlist_add_boolean(sd.sd_nvl, path);
}
ret = zfs_snapshot_nvl(hdl, sd.sd_nvl, props);
nvlist_free(sd.sd_nvl);
zfs_close(zhp);
return (ret);
}
/*
* Destroy any more recent snapshots. We invoke this callback on any dependents
* of the snapshot first. If the 'cb_dependent' member is non-zero, then this
* is a dependent and we should just destroy it without checking the transaction
* group.
*/
typedef struct rollback_data {
const char *cb_target; /* the snapshot */
uint64_t cb_create; /* creation time reference */
boolean_t cb_error;
boolean_t cb_force;
} rollback_data_t;
static int
rollback_destroy_dependent(zfs_handle_t *zhp, void *data)
{
rollback_data_t *cbp = data;
prop_changelist_t *clp;
/* We must destroy this clone; first unmount it */
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
cbp->cb_force ? MS_FORCE: 0);
if (clp == NULL || changelist_prefix(clp) != 0) {
cbp->cb_error = B_TRUE;
zfs_close(zhp);
return (0);
}
if (zfs_destroy(zhp, B_FALSE) != 0)
cbp->cb_error = B_TRUE;
else
changelist_remove(clp, zhp->zfs_name);
(void) changelist_postfix(clp);
changelist_free(clp);
zfs_close(zhp);
return (0);
}
static int
rollback_destroy(zfs_handle_t *zhp, void *data)
{
rollback_data_t *cbp = data;
if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) {
cbp->cb_error |= zfs_iter_dependents(zhp, B_FALSE,
rollback_destroy_dependent, cbp);
cbp->cb_error |= zfs_destroy(zhp, B_FALSE);
}
zfs_close(zhp);
return (0);
}
/*
* Given a dataset, rollback to a specific snapshot, discarding any
* data changes since then and making it the active dataset.
*
* Any snapshots and bookmarks more recent than the target are
* destroyed, along with their dependents (i.e. clones).
*/
int
zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, boolean_t force)
{
rollback_data_t cb = { 0 };
int err;
boolean_t restore_resv = 0;
uint64_t old_volsize = 0, new_volsize;
zfs_prop_t resv_prop = { 0 };
uint64_t min_txg = 0;
assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM ||
zhp->zfs_type == ZFS_TYPE_VOLUME);
/*
* Destroy all recent snapshots and their dependents.
*/
cb.cb_force = force;
cb.cb_target = snap->zfs_name;
cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
if (cb.cb_create > 0)
min_txg = cb.cb_create;
(void) zfs_iter_snapshots(zhp, B_FALSE, rollback_destroy, &cb,
min_txg, 0);
(void) zfs_iter_bookmarks(zhp, rollback_destroy, &cb);
if (cb.cb_error)
return (-1);
/*
* Now that we have verified that the snapshot is the latest,
* rollback to the given snapshot.
*/
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
return (-1);
old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
restore_resv =
(old_volsize == zfs_prop_get_int(zhp, resv_prop));
}
/*
* Pass both the filesystem and the wanted snapshot names,
* we would get an error back if the snapshot is destroyed or
* a new snapshot is created before this request is processed.
*/
err = lzc_rollback_to(zhp->zfs_name, snap->zfs_name);
if (err != 0) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot rollback '%s'"),
zhp->zfs_name);
switch (err) {
case EEXIST:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"there is a snapshot or bookmark more recent "
"than '%s'"), snap->zfs_name);
(void) zfs_error(zhp->zfs_hdl, EZFS_EXISTS, errbuf);
break;
case ESRCH:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"'%s' is not found among snapshots of '%s'"),
snap->zfs_name, zhp->zfs_name);
(void) zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf);
break;
case EINVAL:
(void) zfs_error(zhp->zfs_hdl, EZFS_BADTYPE, errbuf);
break;
default:
(void) zfs_standard_error(zhp->zfs_hdl, err, errbuf);
}
return (err);
}
/*
* For volumes, if the pre-rollback volsize matched the pre-
* rollback reservation and the volsize has changed then set
* the reservation property to the post-rollback volsize.
* Make a new handle since the rollback closed the dataset.
*/
if ((zhp->zfs_type == ZFS_TYPE_VOLUME) &&
(zhp = make_dataset_handle(zhp->zfs_hdl, zhp->zfs_name))) {
if (restore_resv) {
new_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
if (old_volsize != new_volsize)
err = zfs_prop_set_int(zhp, resv_prop,
new_volsize);
}
zfs_close(zhp);
}
return (err);
}
/*
* Renames the given dataset.
*/
int
zfs_rename(zfs_handle_t *zhp, const char *target, renameflags_t flags)
{
int ret = 0;
zfs_cmd_t zc = {"\0"};
char *delim;
prop_changelist_t *cl = NULL;
char parent[ZFS_MAX_DATASET_NAME_LEN];
char property[ZFS_MAXPROPLEN];
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
/* if we have the same exact name, just return success */
if (strcmp(zhp->zfs_name, target) == 0)
return (0);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot rename to '%s'"), target);
/* make sure source name is valid */
if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/*
* Make sure the target name is valid
*/
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
if ((strchr(target, '@') == NULL) ||
*target == '@') {
/*
* Snapshot target name is abbreviated,
* reconstruct full dataset name
*/
(void) strlcpy(parent, zhp->zfs_name,
sizeof (parent));
delim = strchr(parent, '@');
if (strchr(target, '@') == NULL)
*(++delim) = '\0';
else
*delim = '\0';
(void) strlcat(parent, target, sizeof (parent));
target = parent;
} else {
/*
* Make sure we're renaming within the same dataset.
*/
delim = strchr(target, '@');
if (strncmp(zhp->zfs_name, target, delim - target)
!= 0 || zhp->zfs_name[delim - target] != '@') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshots must be part of same "
"dataset"));
return (zfs_error(hdl, EZFS_CROSSTARGET,
errbuf));
}
}
if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
} else {
if (flags.recursive) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"recursive rename must be a snapshot"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents */
if (check_parents(hdl, target, NULL, B_FALSE, NULL) != 0)
return (-1);
/* make sure we're in the same pool */
verify((delim = strchr(target, '/')) != NULL);
if (strncmp(zhp->zfs_name, target, delim - target) != 0 ||
zhp->zfs_name[delim - target] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"datasets must be within same pool"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
}
/* new name cannot be a child of the current dataset name */
if (is_descendant(zhp->zfs_name, target)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"New dataset name cannot be a descendant of "
"current dataset name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
}
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot rename '%s'"), zhp->zfs_name);
if (getzoneid() == GLOBAL_ZONEID &&
zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is used in a non-global zone"));
return (zfs_error(hdl, EZFS_ZONED, errbuf));
}
/*
* Avoid unmounting file systems with mountpoint property set to
* 'legacy' or 'none' even if -u option is not given.
*/
if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM &&
!flags.recursive && !flags.nounmount &&
zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, property,
sizeof (property), NULL, NULL, 0, B_FALSE) == 0 &&
(strcmp(property, "legacy") == 0 ||
strcmp(property, "none") == 0)) {
flags.nounmount = B_TRUE;
}
if (flags.recursive) {
char *parentname = zfs_strdup(zhp->zfs_hdl, zhp->zfs_name);
if (parentname == NULL) {
ret = -1;
goto error;
}
delim = strchr(parentname, '@');
*delim = '\0';
zfs_handle_t *zhrp = zfs_open(zhp->zfs_hdl, parentname,
ZFS_TYPE_DATASET);
free(parentname);
if (zhrp == NULL) {
ret = -1;
goto error;
}
zfs_close(zhrp);
} else if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT) {
if ((cl = changelist_gather(zhp, ZFS_PROP_NAME,
flags.nounmount ? CL_GATHER_DONT_UNMOUNT :
CL_GATHER_ITER_MOUNTED,
flags.forceunmount ? MS_FORCE : 0)) == NULL)
return (-1);
if (changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
ret = -1;
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
}
if (ZFS_IS_VOLUME(zhp))
zc.zc_objset_type = DMU_OST_ZVOL;
else
zc.zc_objset_type = DMU_OST_ZFS;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value));
zc.zc_cookie = !!flags.recursive;
zc.zc_cookie |= (!!flags.nounmount) << 1;
if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_RENAME, &zc)) != 0) {
/*
* if it was recursive, the one that actually failed will
* be in zc.zc_name
*/
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot rename '%s'"), zc.zc_name);
if (flags.recursive && errno == EEXIST) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"a child dataset already has a snapshot "
"with the new name"));
(void) zfs_error(hdl, EZFS_EXISTS, errbuf);
} else if (errno == EACCES) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot move encrypted child outside of "
"its encryption root"));
(void) zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
} else {
(void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf);
}
/*
* On failure, we still want to remount any filesystems that
* were previously mounted, so we don't alter the system state.
*/
if (cl != NULL)
(void) changelist_postfix(cl);
} else {
if (cl != NULL) {
changelist_rename(cl, zfs_get_name(zhp), target);
ret = changelist_postfix(cl);
}
}
error:
if (cl != NULL) {
changelist_free(cl);
}
return (ret);
}
nvlist_t *
zfs_get_all_props(zfs_handle_t *zhp)
{
return (zhp->zfs_props);
}
nvlist_t *
zfs_get_recvd_props(zfs_handle_t *zhp)
{
if (zhp->zfs_recvd_props == NULL)
if (get_recvd_props_ioctl(zhp) != 0)
return (NULL);
return (zhp->zfs_recvd_props);
}
nvlist_t *
zfs_get_user_props(zfs_handle_t *zhp)
{
return (zhp->zfs_user_props);
}
/*
* This function is used by 'zfs list' to determine the exact set of columns to
* display, and their maximum widths. This does two main things:
*
* - If this is a list of all properties, then expand the list to include
* all native properties, and set a flag so that for each dataset we look
* for new unique user properties and add them to the list.
*
* - For non fixed-width properties, keep track of the maximum width seen
* so that we can size the column appropriately. If the user has
* requested received property values, we also need to compute the width
* of the RECEIVED column.
*/
int
zfs_expand_proplist(zfs_handle_t *zhp, zprop_list_t **plp, boolean_t received,
boolean_t literal)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zprop_list_t *entry;
zprop_list_t **last, **start;
nvlist_t *userprops, *propval;
nvpair_t *elem;
char *strval;
char buf[ZFS_MAXPROPLEN];
if (zprop_expand_list(hdl, plp, ZFS_TYPE_DATASET) != 0)
return (-1);
userprops = zfs_get_user_props(zhp);
entry = *plp;
if (entry->pl_all && nvlist_next_nvpair(userprops, NULL) != NULL) {
/*
* Go through and add any user properties as necessary. We
* start by incrementing our list pointer to the first
* non-native property.
*/
start = plp;
while (*start != NULL) {
if ((*start)->pl_prop == ZPROP_INVAL)
break;
start = &(*start)->pl_next;
}
elem = NULL;
while ((elem = nvlist_next_nvpair(userprops, elem)) != NULL) {
/*
* See if we've already found this property in our list.
*/
for (last = start; *last != NULL;
last = &(*last)->pl_next) {
if (strcmp((*last)->pl_user_prop,
nvpair_name(elem)) == 0)
break;
}
if (*last == NULL) {
if ((entry = zfs_alloc(hdl,
sizeof (zprop_list_t))) == NULL ||
((entry->pl_user_prop = zfs_strdup(hdl,
nvpair_name(elem)))) == NULL) {
free(entry);
return (-1);
}
entry->pl_prop = ZPROP_INVAL;
entry->pl_width = strlen(nvpair_name(elem));
entry->pl_all = B_TRUE;
*last = entry;
}
}
}
/*
* Now go through and check the width of any non-fixed columns
*/
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed && !literal)
continue;
if (entry->pl_prop != ZPROP_INVAL) {
if (zfs_prop_get(zhp, entry->pl_prop,
buf, sizeof (buf), NULL, NULL, 0, literal) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
if (received && zfs_prop_get_recvd(zhp,
zfs_prop_to_name(entry->pl_prop),
buf, sizeof (buf), literal) == 0)
if (strlen(buf) > entry->pl_recvd_width)
entry->pl_recvd_width = strlen(buf);
} else {
if (nvlist_lookup_nvlist(userprops, entry->pl_user_prop,
&propval) == 0) {
verify(nvlist_lookup_string(propval,
ZPROP_VALUE, &strval) == 0);
if (strlen(strval) > entry->pl_width)
entry->pl_width = strlen(strval);
}
if (received && zfs_prop_get_recvd(zhp,
entry->pl_user_prop,
buf, sizeof (buf), literal) == 0)
if (strlen(buf) > entry->pl_recvd_width)
entry->pl_recvd_width = strlen(buf);
}
}
return (0);
}
void
zfs_prune_proplist(zfs_handle_t *zhp, uint8_t *props)
{
nvpair_t *curr;
nvpair_t *next;
/*
* Keep a reference to the props-table against which we prune the
* properties.
*/
zhp->zfs_props_table = props;
curr = nvlist_next_nvpair(zhp->zfs_props, NULL);
while (curr) {
zfs_prop_t zfs_prop = zfs_name_to_prop(nvpair_name(curr));
next = nvlist_next_nvpair(zhp->zfs_props, curr);
/*
* User properties will result in ZPROP_INVAL, and since we
* only know how to prune standard ZFS properties, we always
* leave these in the list. This can also happen if we
* encounter an unknown DSL property (when running older
* software, for example).
*/
if (zfs_prop != ZPROP_INVAL && props[zfs_prop] == B_FALSE)
(void) nvlist_remove(zhp->zfs_props,
nvpair_name(curr), nvpair_type(curr));
curr = next;
}
}
static int
zfs_smb_acl_mgmt(libzfs_handle_t *hdl, char *dataset, char *path,
zfs_smb_acl_op_t cmd, char *resource1, char *resource2)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *nvlist = NULL;
int error;
(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, path, sizeof (zc.zc_value));
zc.zc_cookie = (uint64_t)cmd;
if (cmd == ZFS_SMB_ACL_RENAME) {
if (nvlist_alloc(&nvlist, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (0);
}
}
switch (cmd) {
case ZFS_SMB_ACL_ADD:
case ZFS_SMB_ACL_REMOVE:
(void) strlcpy(zc.zc_string, resource1, sizeof (zc.zc_string));
break;
case ZFS_SMB_ACL_RENAME:
if (nvlist_add_string(nvlist, ZFS_SMB_ACL_SRC,
resource1) != 0) {
(void) no_memory(hdl);
return (-1);
}
if (nvlist_add_string(nvlist, ZFS_SMB_ACL_TARGET,
resource2) != 0) {
(void) no_memory(hdl);
return (-1);
}
if (zcmd_write_src_nvlist(hdl, &zc, nvlist) != 0) {
nvlist_free(nvlist);
return (-1);
}
break;
case ZFS_SMB_ACL_PURGE:
break;
default:
return (-1);
}
error = ioctl(hdl->libzfs_fd, ZFS_IOC_SMB_ACL, &zc);
nvlist_free(nvlist);
return (error);
}
int
zfs_smb_acl_add(libzfs_handle_t *hdl, char *dataset,
char *path, char *resource)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_ADD,
resource, NULL));
}
int
zfs_smb_acl_remove(libzfs_handle_t *hdl, char *dataset,
char *path, char *resource)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_REMOVE,
resource, NULL));
}
int
zfs_smb_acl_purge(libzfs_handle_t *hdl, char *dataset, char *path)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_PURGE,
NULL, NULL));
}
int
zfs_smb_acl_rename(libzfs_handle_t *hdl, char *dataset, char *path,
char *oldname, char *newname)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_RENAME,
oldname, newname));
}
int
zfs_userspace(zfs_handle_t *zhp, zfs_userquota_prop_t type,
zfs_userspace_cb_t func, void *arg)
{
zfs_cmd_t zc = {"\0"};
zfs_useracct_t buf[100];
libzfs_handle_t *hdl = zhp->zfs_hdl;
int ret;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_objset_type = type;
zc.zc_nvlist_dst = (uintptr_t)buf;
for (;;) {
zfs_useracct_t *zua = buf;
zc.zc_nvlist_dst_size = sizeof (buf);
if (zfs_ioctl(hdl, ZFS_IOC_USERSPACE_MANY, &zc) != 0) {
if ((errno == ENOTSUP &&
(type == ZFS_PROP_USEROBJUSED ||
type == ZFS_PROP_GROUPOBJUSED ||
type == ZFS_PROP_USEROBJQUOTA ||
type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_PROJECTOBJUSED ||
type == ZFS_PROP_PROJECTOBJQUOTA ||
type == ZFS_PROP_PROJECTUSED ||
type == ZFS_PROP_PROJECTQUOTA)))
break;
return (zfs_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN,
"cannot get used/quota for %s"), zc.zc_name));
}
if (zc.zc_nvlist_dst_size == 0)
break;
while (zc.zc_nvlist_dst_size > 0) {
if ((ret = func(arg, zua->zu_domain, zua->zu_rid,
zua->zu_space)) != 0)
return (ret);
zua++;
zc.zc_nvlist_dst_size -= sizeof (zfs_useracct_t);
}
}
return (0);
}
struct holdarg {
nvlist_t *nvl;
const char *snapname;
const char *tag;
boolean_t recursive;
int error;
};
static int
zfs_hold_one(zfs_handle_t *zhp, void *arg)
{
struct holdarg *ha = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
ha->snapname) >= sizeof (name))
return (EINVAL);
if (lzc_exists(name))
fnvlist_add_string(ha->nvl, name, ha->tag);
if (ha->recursive)
rv = zfs_iter_filesystems(zhp, zfs_hold_one, ha);
zfs_close(zhp);
return (rv);
}
int
zfs_hold(zfs_handle_t *zhp, const char *snapname, const char *tag,
boolean_t recursive, int cleanup_fd)
{
int ret;
struct holdarg ha;
ha.nvl = fnvlist_alloc();
ha.snapname = snapname;
ha.tag = tag;
ha.recursive = recursive;
(void) zfs_hold_one(zfs_handle_dup(zhp), &ha);
if (nvlist_empty(ha.nvl)) {
char errbuf[1024];
fnvlist_free(ha.nvl);
ret = ENOENT;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot hold snapshot '%s@%s'"),
zhp->zfs_name, snapname);
(void) zfs_standard_error(zhp->zfs_hdl, ret, errbuf);
return (ret);
}
ret = zfs_hold_nvl(zhp, cleanup_fd, ha.nvl);
fnvlist_free(ha.nvl);
return (ret);
}
int
zfs_hold_nvl(zfs_handle_t *zhp, int cleanup_fd, nvlist_t *holds)
{
int ret;
nvlist_t *errors;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
nvpair_t *elem;
errors = NULL;
ret = lzc_hold(holds, cleanup_fd, &errors);
if (ret == 0) {
/* There may be errors even in the success case. */
fnvlist_free(errors);
return (0);
}
if (nvlist_empty(errors)) {
/* no hold-specific errors */
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot hold"));
switch (ret) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
default:
(void) zfs_standard_error(hdl, ret, errbuf);
}
}
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot hold snapshot '%s'"), nvpair_name(elem));
switch (fnvpair_value_int32(elem)) {
case E2BIG:
/*
* Temporary tags wind up having the ds object id
* prepended. So even if we passed the length check
* above, it's still possible for the tag to wind
* up being slightly too long.
*/
(void) zfs_error(hdl, EZFS_TAGTOOLONG, errbuf);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case EEXIST:
(void) zfs_error(hdl, EZFS_REFTAG_HOLD, errbuf);
break;
default:
(void) zfs_standard_error(hdl,
fnvpair_value_int32(elem), errbuf);
}
}
fnvlist_free(errors);
return (ret);
}
static int
zfs_release_one(zfs_handle_t *zhp, void *arg)
{
struct holdarg *ha = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
nvlist_t *existing_holds;
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
ha->snapname) >= sizeof (name)) {
ha->error = EINVAL;
rv = EINVAL;
}
if (lzc_get_holds(name, &existing_holds) != 0) {
ha->error = ENOENT;
} else if (!nvlist_exists(existing_holds, ha->tag)) {
ha->error = ESRCH;
} else {
nvlist_t *torelease = fnvlist_alloc();
fnvlist_add_boolean(torelease, ha->tag);
fnvlist_add_nvlist(ha->nvl, name, torelease);
fnvlist_free(torelease);
}
if (ha->recursive)
rv = zfs_iter_filesystems(zhp, zfs_release_one, ha);
zfs_close(zhp);
return (rv);
}
int
zfs_release(zfs_handle_t *zhp, const char *snapname, const char *tag,
boolean_t recursive)
{
int ret;
struct holdarg ha;
nvlist_t *errors = NULL;
nvpair_t *elem;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
ha.nvl = fnvlist_alloc();
ha.snapname = snapname;
ha.tag = tag;
ha.recursive = recursive;
ha.error = 0;
(void) zfs_release_one(zfs_handle_dup(zhp), &ha);
if (nvlist_empty(ha.nvl)) {
fnvlist_free(ha.nvl);
ret = ha.error;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot release hold from snapshot '%s@%s'"),
zhp->zfs_name, snapname);
if (ret == ESRCH) {
(void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf);
} else {
(void) zfs_standard_error(hdl, ret, errbuf);
}
return (ret);
}
ret = lzc_release(ha.nvl, &errors);
fnvlist_free(ha.nvl);
if (ret == 0) {
/* There may be errors even in the success case. */
fnvlist_free(errors);
return (0);
}
if (nvlist_empty(errors)) {
/* no hold-specific errors */
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot release"));
switch (errno) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
default:
(void) zfs_standard_error(hdl, errno, errbuf);
}
}
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot release hold from snapshot '%s'"),
nvpair_name(elem));
switch (fnvpair_value_int32(elem)) {
case ESRCH:
(void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
default:
(void) zfs_standard_error(hdl,
fnvpair_value_int32(elem), errbuf);
}
}
fnvlist_free(errors);
return (ret);
}
int
zfs_get_fsacl(zfs_handle_t *zhp, nvlist_t **nvl)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
int nvsz = 2048;
void *nvbuf;
int err = 0;
char errbuf[1024];
assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
tryagain:
nvbuf = malloc(nvsz);
if (nvbuf == NULL) {
err = (zfs_error(hdl, EZFS_NOMEM, strerror(errno)));
goto out;
}
zc.zc_nvlist_dst_size = nvsz;
zc.zc_nvlist_dst = (uintptr_t)nvbuf;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_GET_FSACL, &zc) != 0) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get permissions on '%s'"),
zc.zc_name);
switch (errno) {
case ENOMEM:
free(nvbuf);
nvsz = zc.zc_nvlist_dst_size;
goto tryagain;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error(hdl, errno, errbuf);
break;
}
} else {
/* success */
int rc = nvlist_unpack(nvbuf, zc.zc_nvlist_dst_size, nvl, 0);
if (rc) {
err = zfs_standard_error_fmt(hdl, rc, dgettext(
TEXT_DOMAIN, "cannot get permissions on '%s'"),
zc.zc_name);
}
}
free(nvbuf);
out:
return (err);
}
int
zfs_set_fsacl(zfs_handle_t *zhp, boolean_t un, nvlist_t *nvl)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
char *nvbuf;
char errbuf[1024];
size_t nvsz;
int err;
assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
err = nvlist_size(nvl, &nvsz, NV_ENCODE_NATIVE);
assert(err == 0);
nvbuf = malloc(nvsz);
err = nvlist_pack(nvl, &nvbuf, &nvsz, NV_ENCODE_NATIVE, 0);
assert(err == 0);
zc.zc_nvlist_src_size = nvsz;
zc.zc_nvlist_src = (uintptr_t)nvbuf;
zc.zc_perm_action = un;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_SET_FSACL, &zc) != 0) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set permissions on '%s'"),
zc.zc_name);
switch (errno) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error(hdl, errno, errbuf);
break;
}
}
free(nvbuf);
return (err);
}
int
zfs_get_holds(zfs_handle_t *zhp, nvlist_t **nvl)
{
int err;
char errbuf[1024];
err = lzc_get_holds(zhp->zfs_name, nvl);
if (err != 0) {
libzfs_handle_t *hdl = zhp->zfs_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get holds for '%s'"),
zhp->zfs_name);
switch (err) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error(hdl, errno, errbuf);
break;
}
}
return (err);
}
/*
* The theory of raidz space accounting
*
* The "referenced" property of RAIDZ vdevs is scaled such that a 128KB block
* will "reference" 128KB, even though it allocates more than that, to store the
* parity information (and perhaps skip sectors). This concept of the
* "referenced" (and other DMU space accounting) being lower than the allocated
* space by a constant factor is called "raidz deflation."
*
* As mentioned above, the constant factor for raidz deflation assumes a 128KB
* block size. However, zvols typically have a much smaller block size (default
* 8KB). These smaller blocks may require proportionally much more parity
* information (and perhaps skip sectors). In this case, the change to the
* "referenced" property may be much more than the logical block size.
*
* Suppose a raidz vdev has 5 disks with ashift=12. A 128k block may be written
* as follows.
*
* +-------+-------+-------+-------+-------+
* | disk1 | disk2 | disk3 | disk4 | disk5 |
* +-------+-------+-------+-------+-------+
* | P0 | D0 | D8 | D16 | D24 |
* | P1 | D1 | D9 | D17 | D25 |
* | P2 | D2 | D10 | D18 | D26 |
* | P3 | D3 | D11 | D19 | D27 |
* | P4 | D4 | D12 | D20 | D28 |
* | P5 | D5 | D13 | D21 | D29 |
* | P6 | D6 | D14 | D22 | D30 |
* | P7 | D7 | D15 | D23 | D31 |
* +-------+-------+-------+-------+-------+
*
* Above, notice that 160k was allocated: 8 x 4k parity sectors + 32 x 4k data
* sectors. The dataset's referenced will increase by 128k and the pool's
* allocated and free properties will be adjusted by 160k.
*
* A 4k block written to the same raidz vdev will require two 4k sectors. The
* blank cells represent unallocated space.
*
* +-------+-------+-------+-------+-------+
* | disk1 | disk2 | disk3 | disk4 | disk5 |
* +-------+-------+-------+-------+-------+
* | P0 | D0 | | | |
* +-------+-------+-------+-------+-------+
*
* Above, notice that the 4k block required one sector for parity and another
* for data. vdev_raidz_asize() will return 8k and as such the pool's allocated
* and free properties will be adjusted by 8k. The dataset will not be charged
* 8k. Rather, it will be charged a value that is scaled according to the
* overhead of the 128k block on the same vdev. This 8k allocation will be
* charged 8k * 128k / 160k. 128k is from SPA_OLD_MAXBLOCKSIZE and 160k is as
* calculated in the 128k block example above.
*
* Every raidz allocation is sized to be a multiple of nparity+1 sectors. That
* is, every raidz1 allocation will be a multiple of 2 sectors, raidz2
* allocations are a multiple of 3 sectors, and raidz3 allocations are a
* multiple of of 4 sectors. When a block does not fill the required number of
* sectors, skip blocks (sectors) are used.
*
* An 8k block being written to a raidz vdev may be written as follows:
*
* +-------+-------+-------+-------+-------+
* | disk1 | disk2 | disk3 | disk4 | disk5 |
* +-------+-------+-------+-------+-------+
* | P0 | D0 | D1 | S0 | |
* +-------+-------+-------+-------+-------+
*
* In order to maintain the nparity+1 allocation size, a skip block (S0) was
* added. For this 8k block, the pool's allocated and free properties are
* adjusted by 16k and the dataset's referenced is increased by 16k * 128k /
* 160k. Again, 128k is from SPA_OLD_MAXBLOCKSIZE and 160k is as calculated in
* the 128k block example above.
*
* The situation is slightly different for dRAID since the minimum allocation
* size is the full group width. The same 8K block above would be written as
* follows in a dRAID group:
*
* +-------+-------+-------+-------+-------+
* | disk1 | disk2 | disk3 | disk4 | disk5 |
* +-------+-------+-------+-------+-------+
* | P0 | D0 | D1 | S0 | S1 |
* +-------+-------+-------+-------+-------+
*
* Compression may lead to a variety of block sizes being written for the same
* volume or file. There is no clear way to reserve just the amount of space
* that will be required, so the worst case (no compression) is assumed.
* Note that metadata blocks will typically be compressed, so the reservation
* size returned by zvol_volsize_to_reservation() will generally be slightly
* larger than the maximum that the volume can reference.
*/
/*
* Derived from function of same name in module/zfs/vdev_raidz.c. Returns the
* amount of space (in bytes) that will be allocated for the specified block
* size. Note that the "referenced" space accounted will be less than this, but
* not necessarily equal to "blksize", due to RAIDZ deflation.
*/
static uint64_t
vdev_raidz_asize(uint64_t ndisks, uint64_t nparity, uint64_t ashift,
uint64_t blksize)
{
uint64_t asize, ndata;
ASSERT3U(ndisks, >, nparity);
ndata = ndisks - nparity;
asize = ((blksize - 1) >> ashift) + 1;
asize += nparity * ((asize + ndata - 1) / ndata);
asize = roundup(asize, nparity + 1) << ashift;
return (asize);
}
/*
* Derived from function of same name in module/zfs/vdev_draid.c. Returns the
* amount of space (in bytes) that will be allocated for the specified block
* size.
*/
static uint64_t
vdev_draid_asize(uint64_t ndisks, uint64_t nparity, uint64_t ashift,
uint64_t blksize)
{
ASSERT3U(ndisks, >, nparity);
uint64_t ndata = ndisks - nparity;
uint64_t rows = ((blksize - 1) / (ndata << ashift)) + 1;
uint64_t asize = (rows * ndisks) << ashift;
return (asize);
}
/*
* Determine how much space will be allocated if it lands on the most space-
* inefficient top-level vdev. Returns the size in bytes required to store one
* copy of the volume data. See theory comment above.
*/
static uint64_t
volsize_from_vdevs(zpool_handle_t *zhp, uint64_t nblocks, uint64_t blksize)
{
nvlist_t *config, *tree, **vdevs;
uint_t nvdevs;
uint64_t ret = 0;
config = zpool_get_config(zhp, NULL);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree) != 0 ||
nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN,
&vdevs, &nvdevs) != 0) {
return (nblocks * blksize);
}
for (int v = 0; v < nvdevs; v++) {
char *type;
uint64_t nparity, ashift, asize, tsize;
uint64_t volsize;
if (nvlist_lookup_string(vdevs[v], ZPOOL_CONFIG_TYPE,
&type) != 0)
continue;
if (strcmp(type, VDEV_TYPE_RAIDZ) != 0 &&
strcmp(type, VDEV_TYPE_DRAID) != 0)
continue;
if (nvlist_lookup_uint64(vdevs[v],
ZPOOL_CONFIG_NPARITY, &nparity) != 0)
continue;
if (nvlist_lookup_uint64(vdevs[v],
ZPOOL_CONFIG_ASHIFT, &ashift) != 0)
continue;
if (strcmp(type, VDEV_TYPE_RAIDZ) == 0) {
nvlist_t **disks;
uint_t ndisks;
if (nvlist_lookup_nvlist_array(vdevs[v],
ZPOOL_CONFIG_CHILDREN, &disks, &ndisks) != 0)
continue;
/* allocation size for the "typical" 128k block */
tsize = vdev_raidz_asize(ndisks, nparity, ashift,
SPA_OLD_MAXBLOCKSIZE);
/* allocation size for the blksize block */
asize = vdev_raidz_asize(ndisks, nparity, ashift,
blksize);
} else {
uint64_t ndata;
if (nvlist_lookup_uint64(vdevs[v],
ZPOOL_CONFIG_DRAID_NDATA, &ndata) != 0)
continue;
/* allocation size for the "typical" 128k block */
tsize = vdev_draid_asize(ndata + nparity, nparity,
ashift, SPA_OLD_MAXBLOCKSIZE);
/* allocation size for the blksize block */
asize = vdev_draid_asize(ndata + nparity, nparity,
ashift, blksize);
}
/*
* Scale this size down as a ratio of 128k / tsize.
* See theory statement above.
*/
volsize = nblocks * asize * SPA_OLD_MAXBLOCKSIZE / tsize;
if (volsize > ret) {
ret = volsize;
}
}
if (ret == 0) {
ret = nblocks * blksize;
}
return (ret);
}
/*
* Convert the zvol's volume size to an appropriate reservation. See theory
* comment above.
*
* Note: If this routine is updated, it is necessary to update the ZFS test
* suite's shell version in reservation.shlib.
*/
uint64_t
zvol_volsize_to_reservation(zpool_handle_t *zph, uint64_t volsize,
nvlist_t *props)
{
uint64_t numdb;
uint64_t nblocks, volblocksize;
int ncopies;
char *strval;
if (nvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_COPIES), &strval) == 0)
ncopies = atoi(strval);
else
ncopies = 1;
if (nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
&volblocksize) != 0)
volblocksize = ZVOL_DEFAULT_BLOCKSIZE;
nblocks = volsize / volblocksize;
/*
* Metadata defaults to using 128k blocks, not volblocksize blocks. For
* this reason, only the data blocks are scaled based on vdev config.
*/
volsize = volsize_from_vdevs(zph, nblocks, volblocksize);
/* start with metadnode L0-L6 */
numdb = 7;
/* calculate number of indirects */
while (nblocks > 1) {
nblocks += DNODES_PER_LEVEL - 1;
nblocks /= DNODES_PER_LEVEL;
numdb += nblocks;
}
numdb *= MIN(SPA_DVAS_PER_BP, ncopies + 1);
volsize *= ncopies;
/*
* this is exactly DN_MAX_INDBLKSHIFT when metadata isn't
* compressed, but in practice they compress down to about
* 1100 bytes
*/
numdb *= 1ULL << DN_MAX_INDBLKSHIFT;
volsize += numdb;
return (volsize);
}
/*
* Wait for the given activity and return the status of the wait (whether or not
* any waiting was done) in the 'waited' parameter. Non-existent fses are
* reported via the 'missing' parameter, rather than by printing an error
* message. This is convenient when this function is called in a loop over a
* long period of time (as it is, for example, by zfs's wait cmd). In that
* scenario, a fs being exported or destroyed should be considered a normal
* event, so we don't want to print an error when we find that the fs doesn't
* exist.
*/
int
zfs_wait_status(zfs_handle_t *zhp, zfs_wait_activity_t activity,
boolean_t *missing, boolean_t *waited)
{
int error = lzc_wait_fs(zhp->zfs_name, activity, waited);
*missing = (error == ENOENT);
if (*missing)
return (0);
if (error != 0) {
(void) zfs_standard_error_fmt(zhp->zfs_hdl, error,
dgettext(TEXT_DOMAIN, "error waiting in fs '%s'"),
zhp->zfs_name);
}
return (error);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c b/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c
index 5729f120e9df..b0279d8fbc3f 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_mount.c
@@ -1,1651 +1,1655 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright 2017 RackTop Systems.
* Copyright (c) 2018 Datto Inc.
* Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
*/
/*
* Routines to manage ZFS mounts. We separate all the nasty routines that have
* to deal with the OS. The following functions are the main entry points --
* they are used by mount and unmount and when changing a filesystem's
* mountpoint.
*
* zfs_is_mounted()
* zfs_mount()
* zfs_mount_at()
* zfs_unmount()
* zfs_unmountall()
*
* This file also contains the functions used to manage sharing filesystems via
* NFS and iSCSI:
*
* zfs_is_shared()
* zfs_share()
* zfs_unshare()
*
* zfs_is_shared_nfs()
* zfs_is_shared_smb()
* zfs_share_proto()
* zfs_shareall();
* zfs_unshare_nfs()
* zfs_unshare_smb()
* zfs_unshareall_nfs()
* zfs_unshareall_smb()
* zfs_unshareall()
* zfs_unshareall_bypath()
*
* The following functions are available for pool consumers, and will
* mount/unmount and share/unshare all datasets within pool:
*
* zpool_enable_datasets()
* zpool_disable_datasets()
*/
#include <dirent.h>
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <zone.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/vfs.h>
#include <sys/dsl_crypt.h>
#include <libzfs.h>
#include "libzfs_impl.h"
#include <thread_pool.h>
#include <libshare.h>
#include <sys/systeminfo.h>
#define MAXISALEN 257 /* based on sysinfo(2) man page */
static int mount_tp_nthr = 512; /* tpool threads for multi-threaded mounting */
static void zfs_mount_task(void *);
static zfs_share_type_t zfs_is_shared_proto(zfs_handle_t *, char **,
zfs_share_proto_t);
/*
* The share protocols table must be in the same order as the zfs_share_proto_t
* enum in libzfs_impl.h
*/
proto_table_t proto_table[PROTO_END] = {
{ZFS_PROP_SHARENFS, "nfs", EZFS_SHARENFSFAILED, EZFS_UNSHARENFSFAILED},
{ZFS_PROP_SHARESMB, "smb", EZFS_SHARESMBFAILED, EZFS_UNSHARESMBFAILED},
};
static zfs_share_proto_t nfs_only[] = {
PROTO_NFS,
PROTO_END
};
static zfs_share_proto_t smb_only[] = {
PROTO_SMB,
PROTO_END
};
static zfs_share_proto_t share_all_proto[] = {
PROTO_NFS,
PROTO_SMB,
PROTO_END
};
static boolean_t
dir_is_empty_stat(const char *dirname)
{
struct stat st;
/*
* We only want to return false if the given path is a non empty
* directory, all other errors are handled elsewhere.
*/
if (stat(dirname, &st) < 0 || !S_ISDIR(st.st_mode)) {
return (B_TRUE);
}
/*
* An empty directory will still have two entries in it, one
* entry for each of "." and "..".
*/
if (st.st_size > 2) {
return (B_FALSE);
}
return (B_TRUE);
}
static boolean_t
dir_is_empty_readdir(const char *dirname)
{
DIR *dirp;
struct dirent64 *dp;
int dirfd;
if ((dirfd = openat(AT_FDCWD, dirname,
O_RDONLY | O_NDELAY | O_LARGEFILE | O_CLOEXEC, 0)) < 0) {
return (B_TRUE);
}
if ((dirp = fdopendir(dirfd)) == NULL) {
(void) close(dirfd);
return (B_TRUE);
}
while ((dp = readdir64(dirp)) != NULL) {
if (strcmp(dp->d_name, ".") == 0 ||
strcmp(dp->d_name, "..") == 0)
continue;
(void) closedir(dirp);
return (B_FALSE);
}
(void) closedir(dirp);
return (B_TRUE);
}
/*
* Returns true if the specified directory is empty. If we can't open the
* directory at all, return true so that the mount can fail with a more
* informative error message.
*/
static boolean_t
dir_is_empty(const char *dirname)
{
struct statfs64 st;
/*
* If the statvfs call fails or the filesystem is not a ZFS
* filesystem, fall back to the slow path which uses readdir.
*/
if ((statfs64(dirname, &st) != 0) ||
(st.f_type != ZFS_SUPER_MAGIC)) {
return (dir_is_empty_readdir(dirname));
}
/*
* At this point, we know the provided path is on a ZFS
* filesystem, so we can use stat instead of readdir to
* determine if the directory is empty or not. We try to avoid
* using readdir because that requires opening "dirname"; this
* open file descriptor can potentially end up in a child
* process if there's a concurrent fork, thus preventing the
* zfs_mount() from otherwise succeeding (the open file
* descriptor inherited by the child process will cause the
* parent's mount to fail with EBUSY). The performance
* implications of replacing the open, read, and close with a
* single stat is nice; but is not the main motivation for the
* added complexity.
*/
return (dir_is_empty_stat(dirname));
}
/*
* Checks to see if the mount is active. If the filesystem is mounted, we fill
* in 'where' with the current mountpoint, and return 1. Otherwise, we return
* 0.
*/
boolean_t
is_mounted(libzfs_handle_t *zfs_hdl, const char *special, char **where)
{
struct mnttab entry;
if (libzfs_mnttab_find(zfs_hdl, special, &entry) != 0)
return (B_FALSE);
if (where != NULL)
*where = zfs_strdup(zfs_hdl, entry.mnt_mountp);
return (B_TRUE);
}
boolean_t
zfs_is_mounted(zfs_handle_t *zhp, char **where)
{
return (is_mounted(zhp->zfs_hdl, zfs_get_name(zhp), where));
}
/*
* Checks any higher order concerns about whether the given dataset is
* mountable, false otherwise. zfs_is_mountable_internal specifically assumes
* that the caller has verified the sanity of mounting the dataset at
* mountpoint to the extent the caller wants.
*/
static boolean_t
zfs_is_mountable_internal(zfs_handle_t *zhp, const char *mountpoint)
{
if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED) &&
getzoneid() == GLOBAL_ZONEID)
return (B_FALSE);
return (B_TRUE);
}
/*
* Returns true if the given dataset is mountable, false otherwise. Returns the
* mountpoint in 'buf'.
*/
boolean_t
zfs_is_mountable(zfs_handle_t *zhp, char *buf, size_t buflen,
zprop_source_t *source, int flags)
{
char sourceloc[MAXNAMELEN];
zprop_source_t sourcetype;
if (!zfs_prop_valid_for_type(ZFS_PROP_MOUNTPOINT, zhp->zfs_type,
B_FALSE))
return (B_FALSE);
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, buf, buflen,
&sourcetype, sourceloc, sizeof (sourceloc), B_FALSE) == 0);
if (strcmp(buf, ZFS_MOUNTPOINT_NONE) == 0 ||
strcmp(buf, ZFS_MOUNTPOINT_LEGACY) == 0)
return (B_FALSE);
if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_OFF)
return (B_FALSE);
if (!zfs_is_mountable_internal(zhp, buf))
return (B_FALSE);
if (zfs_prop_get_int(zhp, ZFS_PROP_REDACTED) && !(flags & MS_FORCE))
return (B_FALSE);
if (source)
*source = sourcetype;
return (B_TRUE);
}
/*
* The filesystem is mounted by invoking the system mount utility rather
* than by the system call mount(2). This ensures that the /etc/mtab
* file is correctly locked for the update. Performing our own locking
* and /etc/mtab update requires making an unsafe assumption about how
* the mount utility performs its locking. Unfortunately, this also means
* in the case of a mount failure we do not have the exact errno. We must
* make due with return value from the mount process.
*
* In the long term a shared library called libmount is under development
* which provides a common API to address the locking and errno issues.
* Once the standard mount utility has been updated to use this library
* we can add an autoconf check to conditionally use it.
*
* http://www.kernel.org/pub/linux/utils/util-linux/libmount-docs/index.html
*/
static int
zfs_add_option(zfs_handle_t *zhp, char *options, int len,
zfs_prop_t prop, char *on, char *off)
{
char *source;
uint64_t value;
/* Skip adding duplicate default options */
if ((strstr(options, on) != NULL) || (strstr(options, off) != NULL))
return (0);
/*
* zfs_prop_get_int() is not used to ensure our mount options
* are not influenced by the current /proc/self/mounts contents.
*/
value = getprop_uint64(zhp, prop, &source);
(void) strlcat(options, ",", len);
(void) strlcat(options, value ? on : off, len);
return (0);
}
static int
zfs_add_options(zfs_handle_t *zhp, char *options, int len)
{
int error = 0;
error = zfs_add_option(zhp, options, len,
ZFS_PROP_ATIME, MNTOPT_ATIME, MNTOPT_NOATIME);
/*
* don't add relatime/strictatime when atime=off, otherwise strictatime
* will force atime=on
*/
if (strstr(options, MNTOPT_NOATIME) == NULL) {
error = zfs_add_option(zhp, options, len,
ZFS_PROP_RELATIME, MNTOPT_RELATIME, MNTOPT_STRICTATIME);
}
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_DEVICES, MNTOPT_DEVICES, MNTOPT_NODEVICES);
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_EXEC, MNTOPT_EXEC, MNTOPT_NOEXEC);
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_READONLY, MNTOPT_RO, MNTOPT_RW);
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_SETUID, MNTOPT_SETUID, MNTOPT_NOSETUID);
error = error ? error : zfs_add_option(zhp, options, len,
ZFS_PROP_NBMAND, MNTOPT_NBMAND, MNTOPT_NONBMAND);
return (error);
}
int
zfs_mount(zfs_handle_t *zhp, const char *options, int flags)
{
char mountpoint[ZFS_MAXPROPLEN];
if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL,
flags))
return (0);
return (zfs_mount_at(zhp, options, flags, mountpoint));
}
/*
* Mount the given filesystem.
*/
int
zfs_mount_at(zfs_handle_t *zhp, const char *options, int flags,
const char *mountpoint)
{
struct stat buf;
char mntopts[MNT_LINE_MAX];
char overlay[ZFS_MAXPROPLEN];
char prop_encroot[MAXNAMELEN];
boolean_t is_encroot;
zfs_handle_t *encroot_hp = zhp;
libzfs_handle_t *hdl = zhp->zfs_hdl;
uint64_t keystatus;
int remount = 0, rc;
if (options == NULL) {
(void) strlcpy(mntopts, MNTOPT_DEFAULTS, sizeof (mntopts));
} else {
(void) strlcpy(mntopts, options, sizeof (mntopts));
}
if (strstr(mntopts, MNTOPT_REMOUNT) != NULL)
remount = 1;
/* Potentially duplicates some checks if invoked by zfs_mount(). */
if (!zfs_is_mountable_internal(zhp, mountpoint))
return (0);
/*
* If the pool is imported read-only then all mounts must be read-only
*/
if (zpool_get_prop_int(zhp->zpool_hdl, ZPOOL_PROP_READONLY, NULL))
(void) strlcat(mntopts, "," MNTOPT_RO, sizeof (mntopts));
/*
* Append default mount options which apply to the mount point.
* This is done because under Linux (unlike Solaris) multiple mount
* points may reference a single super block. This means that just
* given a super block there is no back reference to update the per
* mount point options.
*/
rc = zfs_add_options(zhp, mntopts, sizeof (mntopts));
if (rc) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"default options unavailable"));
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
mountpoint));
}
/*
* If the filesystem is encrypted the key must be loaded in order to
* mount. If the key isn't loaded, the MS_CRYPT flag decides whether
* or not we attempt to load the keys. Note: we must call
* zfs_refresh_properties() here since some callers of this function
* (most notably zpool_enable_datasets()) may implicitly load our key
* by loading the parent's key first.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
zfs_refresh_properties(zhp);
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
/*
* If the key is unavailable and MS_CRYPT is set give the
* user a chance to enter the key. Otherwise just fail
* immediately.
*/
if (keystatus == ZFS_KEYSTATUS_UNAVAILABLE) {
if (flags & MS_CRYPT) {
rc = zfs_crypto_get_encryption_root(zhp,
&is_encroot, prop_encroot);
if (rc) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Failed to get encryption root for "
"'%s'."), zfs_get_name(zhp));
return (rc);
}
if (!is_encroot) {
encroot_hp = zfs_open(hdl, prop_encroot,
ZFS_TYPE_DATASET);
if (encroot_hp == NULL)
return (hdl->libzfs_error);
}
rc = zfs_crypto_load_key(encroot_hp,
B_FALSE, NULL);
if (!is_encroot)
zfs_close(encroot_hp);
if (rc)
return (rc);
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption key not loaded"));
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
mountpoint));
}
}
}
/*
* Append zfsutil option so the mount helper allow the mount
*/
strlcat(mntopts, "," MNTOPT_ZFSUTIL, sizeof (mntopts));
/* Create the directory if it doesn't already exist */
if (lstat(mountpoint, &buf) != 0) {
if (mkdirp(mountpoint, 0755) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to create mountpoint: %s"),
strerror(errno));
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
mountpoint));
}
}
/*
* Overlay mounts are enabled by default but may be disabled
* via the 'overlay' property. The -O flag remains for compatibility.
*/
if (!(flags & MS_OVERLAY)) {
if (zfs_prop_get(zhp, ZFS_PROP_OVERLAY, overlay,
sizeof (overlay), NULL, NULL, 0, B_FALSE) == 0) {
if (strcmp(overlay, "on") == 0) {
flags |= MS_OVERLAY;
}
}
}
/*
* Determine if the mountpoint is empty. If so, refuse to perform the
* mount. We don't perform this check if 'remount' is
* specified or if overlay option (-O) is given
*/
if ((flags & MS_OVERLAY) == 0 && !remount &&
!dir_is_empty(mountpoint)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"directory is not empty"));
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"), mountpoint));
}
/* perform the mount */
rc = do_mount(zhp, mountpoint, mntopts, flags);
if (rc) {
/*
* Generic errors are nasty, but there are just way too many
* from mount(), and they're well-understood. We pick a few
* common ones to improve upon.
*/
if (rc == EBUSY) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"mountpoint or dataset is busy"));
} else if (rc == EPERM) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Insufficient privileges"));
} else if (rc == ENOTSUP) {
int spa_version;
VERIFY(zfs_spa_version(zhp, &spa_version) == 0);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Can't mount a version %llu "
"file system on a version %d pool. Pool must be"
" upgraded to mount this file system."),
(u_longlong_t)zfs_prop_get_int(zhp,
ZFS_PROP_VERSION), spa_version);
} else {
zfs_error_aux(hdl, "%s", strerror(rc));
}
return (zfs_error_fmt(hdl, EZFS_MOUNTFAILED,
dgettext(TEXT_DOMAIN, "cannot mount '%s'"),
zhp->zfs_name));
}
/* remove the mounted entry before re-adding on remount */
if (remount)
libzfs_mnttab_remove(hdl, zhp->zfs_name);
/* add the mounted entry into our cache */
libzfs_mnttab_add(hdl, zfs_get_name(zhp), mountpoint, mntopts);
return (0);
}
/*
* Unmount a single filesystem.
*/
static int
unmount_one(zfs_handle_t *zhp, const char *mountpoint, int flags)
{
int error;
error = do_unmount(zhp, mountpoint, flags);
if (error != 0) {
int libzfs_err;
switch (error) {
case EBUSY:
libzfs_err = EZFS_BUSY;
break;
case EIO:
libzfs_err = EZFS_IO;
break;
case ENOENT:
libzfs_err = EZFS_NOENT;
break;
case ENOMEM:
libzfs_err = EZFS_NOMEM;
break;
case EPERM:
libzfs_err = EZFS_PERM;
break;
default:
libzfs_err = EZFS_UMOUNTFAILED;
}
return (zfs_error_fmt(zhp->zfs_hdl, libzfs_err,
dgettext(TEXT_DOMAIN, "cannot unmount '%s'"),
mountpoint));
}
return (0);
}
/*
* Unmount the given filesystem.
*/
int
zfs_unmount(zfs_handle_t *zhp, const char *mountpoint, int flags)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
struct mnttab entry;
char *mntpt = NULL;
boolean_t encroot, unmounted = B_FALSE;
/* check to see if we need to unmount the filesystem */
if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0)) {
/*
* mountpoint may have come from a call to
* getmnt/getmntany if it isn't NULL. If it is NULL,
* we know it comes from libzfs_mnttab_find which can
* then get freed later. We strdup it to play it safe.
*/
if (mountpoint == NULL)
mntpt = zfs_strdup(hdl, entry.mnt_mountp);
else
mntpt = zfs_strdup(hdl, mountpoint);
/*
* Unshare and unmount the filesystem
*/
if (zfs_unshare_proto(zhp, mntpt, share_all_proto) != 0) {
free(mntpt);
return (-1);
}
zfs_commit_all_shares();
if (unmount_one(zhp, mntpt, flags) != 0) {
free(mntpt);
(void) zfs_shareall(zhp);
zfs_commit_all_shares();
return (-1);
}
libzfs_mnttab_remove(hdl, zhp->zfs_name);
free(mntpt);
unmounted = B_TRUE;
}
/*
* If the MS_CRYPT flag is provided we must ensure we attempt to
* unload the dataset's key regardless of whether we did any work
* to unmount it. We only do this for encryption roots.
*/
if ((flags & MS_CRYPT) != 0 &&
zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
zfs_refresh_properties(zhp);
if (zfs_crypto_get_encryption_root(zhp, &encroot, NULL) != 0 &&
unmounted) {
(void) zfs_mount(zhp, NULL, 0);
return (-1);
}
if (encroot && zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_AVAILABLE &&
zfs_crypto_unload_key(zhp) != 0) {
(void) zfs_mount(zhp, NULL, 0);
return (-1);
}
}
+ zpool_disable_volume_os(zhp->zfs_name);
+
return (0);
}
/*
* Unmount this filesystem and any children inheriting the mountpoint property.
* To do this, just act like we're changing the mountpoint property, but don't
* remount the filesystems afterwards.
*/
int
zfs_unmountall(zfs_handle_t *zhp, int flags)
{
prop_changelist_t *clp;
int ret;
clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT,
CL_GATHER_ITER_MOUNTED, flags);
if (clp == NULL)
return (-1);
ret = changelist_prefix(clp);
changelist_free(clp);
return (ret);
}
boolean_t
zfs_is_shared(zfs_handle_t *zhp)
{
zfs_share_type_t rc = 0;
zfs_share_proto_t *curr_proto;
if (ZFS_IS_VOLUME(zhp))
return (B_FALSE);
for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
curr_proto++)
rc |= zfs_is_shared_proto(zhp, NULL, *curr_proto);
return (rc ? B_TRUE : B_FALSE);
}
/*
* Unshare a filesystem by mountpoint.
*/
int
unshare_one(libzfs_handle_t *hdl, const char *name, const char *mountpoint,
zfs_share_proto_t proto)
{
int err;
err = sa_disable_share(mountpoint, proto_table[proto].p_name);
if (err != SA_OK) {
return (zfs_error_fmt(hdl, proto_table[proto].p_unshare_err,
dgettext(TEXT_DOMAIN, "cannot unshare '%s': %s"),
name, sa_errorstr(err)));
}
return (0);
}
/*
* Query libshare for the given mountpoint and protocol, returning
* a zfs_share_type_t value.
*/
zfs_share_type_t
is_shared(const char *mountpoint, zfs_share_proto_t proto)
{
if (sa_is_shared(mountpoint, proto_table[proto].p_name)) {
switch (proto) {
case PROTO_NFS:
return (SHARED_NFS);
case PROTO_SMB:
return (SHARED_SMB);
default:
return (SHARED_NOT_SHARED);
}
}
return (SHARED_NOT_SHARED);
}
/*
* Share the given filesystem according to the options in the specified
* protocol specific properties (sharenfs, sharesmb). We rely
* on "libshare" to do the dirty work for us.
*/
int
zfs_share_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
{
char mountpoint[ZFS_MAXPROPLEN];
char shareopts[ZFS_MAXPROPLEN];
char sourcestr[ZFS_MAXPROPLEN];
zfs_share_proto_t *curr_proto;
zprop_source_t sourcetype;
int err = 0;
if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint), NULL, 0))
return (0);
for (curr_proto = proto; *curr_proto != PROTO_END; curr_proto++) {
/*
* Return success if there are no share options.
*/
if (zfs_prop_get(zhp, proto_table[*curr_proto].p_prop,
shareopts, sizeof (shareopts), &sourcetype, sourcestr,
ZFS_MAXPROPLEN, B_FALSE) != 0 ||
strcmp(shareopts, "off") == 0)
continue;
/*
* If the 'zoned' property is set, then zfs_is_mountable()
* will have already bailed out if we are in the global zone.
* But local zones cannot be NFS servers, so we ignore it for
* local zones as well.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_ZONED))
continue;
err = sa_enable_share(zfs_get_name(zhp), mountpoint, shareopts,
proto_table[*curr_proto].p_name);
if (err != SA_OK) {
return (zfs_error_fmt(zhp->zfs_hdl,
proto_table[*curr_proto].p_share_err,
dgettext(TEXT_DOMAIN, "cannot share '%s: %s'"),
zfs_get_name(zhp), sa_errorstr(err)));
}
}
return (0);
}
int
zfs_share(zfs_handle_t *zhp)
{
assert(!ZFS_IS_VOLUME(zhp));
return (zfs_share_proto(zhp, share_all_proto));
}
int
zfs_unshare(zfs_handle_t *zhp)
{
assert(!ZFS_IS_VOLUME(zhp));
return (zfs_unshareall(zhp));
}
/*
* Check to see if the filesystem is currently shared.
*/
static zfs_share_type_t
zfs_is_shared_proto(zfs_handle_t *zhp, char **where, zfs_share_proto_t proto)
{
char *mountpoint;
zfs_share_type_t rc;
if (!zfs_is_mounted(zhp, &mountpoint))
return (SHARED_NOT_SHARED);
if ((rc = is_shared(mountpoint, proto))
!= SHARED_NOT_SHARED) {
if (where != NULL)
*where = mountpoint;
else
free(mountpoint);
return (rc);
} else {
free(mountpoint);
return (SHARED_NOT_SHARED);
}
}
boolean_t
zfs_is_shared_nfs(zfs_handle_t *zhp, char **where)
{
return (zfs_is_shared_proto(zhp, where,
PROTO_NFS) != SHARED_NOT_SHARED);
}
boolean_t
zfs_is_shared_smb(zfs_handle_t *zhp, char **where)
{
return (zfs_is_shared_proto(zhp, where,
PROTO_SMB) != SHARED_NOT_SHARED);
}
/*
* zfs_parse_options(options, proto)
*
* Call the legacy parse interface to get the protocol specific
* options using the NULL arg to indicate that this is a "parse" only.
*/
int
zfs_parse_options(char *options, zfs_share_proto_t proto)
{
return (sa_validate_shareopts(options, proto_table[proto].p_name));
}
void
zfs_commit_proto(zfs_share_proto_t *proto)
{
zfs_share_proto_t *curr_proto;
for (curr_proto = proto; *curr_proto != PROTO_END; curr_proto++) {
sa_commit_shares(proto_table[*curr_proto].p_name);
}
}
void
zfs_commit_nfs_shares(void)
{
zfs_commit_proto(nfs_only);
}
void
zfs_commit_smb_shares(void)
{
zfs_commit_proto(smb_only);
}
void
zfs_commit_all_shares(void)
{
zfs_commit_proto(share_all_proto);
}
void
zfs_commit_shares(const char *proto)
{
if (proto == NULL)
zfs_commit_proto(share_all_proto);
else if (strcmp(proto, "nfs") == 0)
zfs_commit_proto(nfs_only);
else if (strcmp(proto, "smb") == 0)
zfs_commit_proto(smb_only);
}
int
zfs_share_nfs(zfs_handle_t *zhp)
{
return (zfs_share_proto(zhp, nfs_only));
}
int
zfs_share_smb(zfs_handle_t *zhp)
{
return (zfs_share_proto(zhp, smb_only));
}
int
zfs_shareall(zfs_handle_t *zhp)
{
return (zfs_share_proto(zhp, share_all_proto));
}
/*
* Unshare the given filesystem.
*/
int
zfs_unshare_proto(zfs_handle_t *zhp, const char *mountpoint,
zfs_share_proto_t *proto)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
struct mnttab entry;
char *mntpt = NULL;
/* check to see if need to unmount the filesystem */
if (mountpoint != NULL)
mntpt = zfs_strdup(hdl, mountpoint);
if (mountpoint != NULL || ((zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) &&
libzfs_mnttab_find(hdl, zfs_get_name(zhp), &entry) == 0)) {
zfs_share_proto_t *curr_proto;
if (mountpoint == NULL)
mntpt = zfs_strdup(zhp->zfs_hdl, entry.mnt_mountp);
for (curr_proto = proto; *curr_proto != PROTO_END;
curr_proto++) {
if (is_shared(mntpt, *curr_proto)) {
if (unshare_one(hdl, zhp->zfs_name,
mntpt, *curr_proto) != 0) {
if (mntpt != NULL)
free(mntpt);
return (-1);
}
}
}
}
if (mntpt != NULL)
free(mntpt);
return (0);
}
int
zfs_unshare_nfs(zfs_handle_t *zhp, const char *mountpoint)
{
return (zfs_unshare_proto(zhp, mountpoint, nfs_only));
}
int
zfs_unshare_smb(zfs_handle_t *zhp, const char *mountpoint)
{
return (zfs_unshare_proto(zhp, mountpoint, smb_only));
}
/*
* Same as zfs_unmountall(), but for NFS and SMB unshares.
*/
static int
zfs_unshareall_proto(zfs_handle_t *zhp, zfs_share_proto_t *proto)
{
prop_changelist_t *clp;
int ret;
clp = changelist_gather(zhp, ZFS_PROP_SHARENFS, 0, 0);
if (clp == NULL)
return (-1);
ret = changelist_unshare(clp, proto);
changelist_free(clp);
return (ret);
}
int
zfs_unshareall_nfs(zfs_handle_t *zhp)
{
return (zfs_unshareall_proto(zhp, nfs_only));
}
int
zfs_unshareall_smb(zfs_handle_t *zhp)
{
return (zfs_unshareall_proto(zhp, smb_only));
}
int
zfs_unshareall(zfs_handle_t *zhp)
{
return (zfs_unshareall_proto(zhp, share_all_proto));
}
int
zfs_unshareall_bypath(zfs_handle_t *zhp, const char *mountpoint)
{
return (zfs_unshare_proto(zhp, mountpoint, share_all_proto));
}
int
zfs_unshareall_bytype(zfs_handle_t *zhp, const char *mountpoint,
const char *proto)
{
if (proto == NULL)
return (zfs_unshare_proto(zhp, mountpoint, share_all_proto));
if (strcmp(proto, "nfs") == 0)
return (zfs_unshare_proto(zhp, mountpoint, nfs_only));
else if (strcmp(proto, "smb") == 0)
return (zfs_unshare_proto(zhp, mountpoint, smb_only));
else
return (1);
}
/*
* Remove the mountpoint associated with the current dataset, if necessary.
* We only remove the underlying directory if:
*
* - The mountpoint is not 'none' or 'legacy'
* - The mountpoint is non-empty
* - The mountpoint is the default or inherited
* - The 'zoned' property is set, or we're in a local zone
*
* Any other directories we leave alone.
*/
void
remove_mountpoint(zfs_handle_t *zhp)
{
char mountpoint[ZFS_MAXPROPLEN];
zprop_source_t source;
if (!zfs_is_mountable(zhp, mountpoint, sizeof (mountpoint),
&source, 0))
return;
if (source == ZPROP_SRC_DEFAULT ||
source == ZPROP_SRC_INHERITED) {
/*
* Try to remove the directory, silently ignoring any errors.
* The filesystem may have since been removed or moved around,
* and this error isn't really useful to the administrator in
* any way.
*/
(void) rmdir(mountpoint);
}
}
/*
* Add the given zfs handle to the cb_handles array, dynamically reallocating
* the array if it is out of space.
*/
void
libzfs_add_handle(get_all_cb_t *cbp, zfs_handle_t *zhp)
{
if (cbp->cb_alloc == cbp->cb_used) {
size_t newsz;
zfs_handle_t **newhandles;
newsz = cbp->cb_alloc != 0 ? cbp->cb_alloc * 2 : 64;
newhandles = zfs_realloc(zhp->zfs_hdl,
cbp->cb_handles, cbp->cb_alloc * sizeof (zfs_handle_t *),
newsz * sizeof (zfs_handle_t *));
cbp->cb_handles = newhandles;
cbp->cb_alloc = newsz;
}
cbp->cb_handles[cbp->cb_used++] = zhp;
}
/*
* Recursive helper function used during file system enumeration
*/
static int
zfs_iter_cb(zfs_handle_t *zhp, void *data)
{
get_all_cb_t *cbp = data;
if (!(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM)) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_NOAUTO) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_UNAVAILABLE) {
zfs_close(zhp);
return (0);
}
/*
* If this filesystem is inconsistent and has a receive resume
* token, we can not mount it.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
NULL, 0, NULL, NULL, 0, B_TRUE) == 0) {
zfs_close(zhp);
return (0);
}
libzfs_add_handle(cbp, zhp);
if (zfs_iter_filesystems(zhp, zfs_iter_cb, cbp) != 0) {
zfs_close(zhp);
return (-1);
}
return (0);
}
/*
* Sort comparator that compares two mountpoint paths. We sort these paths so
* that subdirectories immediately follow their parents. This means that we
* effectively treat the '/' character as the lowest value non-nul char.
* Since filesystems from non-global zones can have the same mountpoint
* as other filesystems, the comparator sorts global zone filesystems to
* the top of the list. This means that the global zone will traverse the
* filesystem list in the correct order and can stop when it sees the
* first zoned filesystem. In a non-global zone, only the delegated
* filesystems are seen.
*
* An example sorted list using this comparator would look like:
*
* /foo
* /foo/bar
* /foo/bar/baz
* /foo/baz
* /foo.bar
* /foo (NGZ1)
* /foo (NGZ2)
*
* The mounting code depends on this ordering to deterministically iterate
* over filesystems in order to spawn parallel mount tasks.
*/
static int
mountpoint_cmp(const void *arga, const void *argb)
{
zfs_handle_t *const *zap = arga;
zfs_handle_t *za = *zap;
zfs_handle_t *const *zbp = argb;
zfs_handle_t *zb = *zbp;
char mounta[MAXPATHLEN];
char mountb[MAXPATHLEN];
const char *a = mounta;
const char *b = mountb;
boolean_t gota, gotb;
uint64_t zoneda, zonedb;
zoneda = zfs_prop_get_int(za, ZFS_PROP_ZONED);
zonedb = zfs_prop_get_int(zb, ZFS_PROP_ZONED);
if (zoneda && !zonedb)
return (1);
if (!zoneda && zonedb)
return (-1);
gota = (zfs_get_type(za) == ZFS_TYPE_FILESYSTEM);
if (gota) {
verify(zfs_prop_get(za, ZFS_PROP_MOUNTPOINT, mounta,
sizeof (mounta), NULL, NULL, 0, B_FALSE) == 0);
}
gotb = (zfs_get_type(zb) == ZFS_TYPE_FILESYSTEM);
if (gotb) {
verify(zfs_prop_get(zb, ZFS_PROP_MOUNTPOINT, mountb,
sizeof (mountb), NULL, NULL, 0, B_FALSE) == 0);
}
if (gota && gotb) {
while (*a != '\0' && (*a == *b)) {
a++;
b++;
}
if (*a == *b)
return (0);
if (*a == '\0')
return (-1);
if (*b == '\0')
return (1);
if (*a == '/')
return (-1);
if (*b == '/')
return (1);
return (*a < *b ? -1 : *a > *b);
}
if (gota)
return (-1);
if (gotb)
return (1);
/*
* If neither filesystem has a mountpoint, revert to sorting by
* dataset name.
*/
return (strcmp(zfs_get_name(za), zfs_get_name(zb)));
}
/*
* Return true if path2 is a child of path1 or path2 equals path1 or
* path1 is "/" (path2 is always a child of "/").
*/
static boolean_t
libzfs_path_contains(const char *path1, const char *path2)
{
return (strcmp(path1, path2) == 0 || strcmp(path1, "/") == 0 ||
(strstr(path2, path1) == path2 && path2[strlen(path1)] == '/'));
}
/*
* Given a mountpoint specified by idx in the handles array, find the first
* non-descendent of that mountpoint and return its index. Descendant paths
* start with the parent's path. This function relies on the ordering
* enforced by mountpoint_cmp().
*/
static int
non_descendant_idx(zfs_handle_t **handles, size_t num_handles, int idx)
{
char parent[ZFS_MAXPROPLEN];
char child[ZFS_MAXPROPLEN];
int i;
verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, parent,
sizeof (parent), NULL, NULL, 0, B_FALSE) == 0);
for (i = idx + 1; i < num_handles; i++) {
verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT, child,
sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
if (!libzfs_path_contains(parent, child))
break;
}
return (i);
}
typedef struct mnt_param {
libzfs_handle_t *mnt_hdl;
tpool_t *mnt_tp;
zfs_handle_t **mnt_zhps; /* filesystems to mount */
size_t mnt_num_handles;
int mnt_idx; /* Index of selected entry to mount */
zfs_iter_f mnt_func;
void *mnt_data;
} mnt_param_t;
/*
* Allocate and populate the parameter struct for mount function, and
* schedule mounting of the entry selected by idx.
*/
static void
zfs_dispatch_mount(libzfs_handle_t *hdl, zfs_handle_t **handles,
size_t num_handles, int idx, zfs_iter_f func, void *data, tpool_t *tp)
{
mnt_param_t *mnt_param = zfs_alloc(hdl, sizeof (mnt_param_t));
mnt_param->mnt_hdl = hdl;
mnt_param->mnt_tp = tp;
mnt_param->mnt_zhps = handles;
mnt_param->mnt_num_handles = num_handles;
mnt_param->mnt_idx = idx;
mnt_param->mnt_func = func;
mnt_param->mnt_data = data;
(void) tpool_dispatch(tp, zfs_mount_task, (void*)mnt_param);
}
/*
* This is the structure used to keep state of mounting or sharing operations
* during a call to zpool_enable_datasets().
*/
typedef struct mount_state {
/*
* ms_mntstatus is set to -1 if any mount fails. While multiple threads
* could update this variable concurrently, no synchronization is
* needed as it's only ever set to -1.
*/
int ms_mntstatus;
int ms_mntflags;
const char *ms_mntopts;
} mount_state_t;
static int
zfs_mount_one(zfs_handle_t *zhp, void *arg)
{
mount_state_t *ms = arg;
int ret = 0;
/*
* don't attempt to mount encrypted datasets with
* unloaded keys
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_UNAVAILABLE)
return (0);
if (zfs_mount(zhp, ms->ms_mntopts, ms->ms_mntflags) != 0)
ret = ms->ms_mntstatus = -1;
return (ret);
}
static int
zfs_share_one(zfs_handle_t *zhp, void *arg)
{
mount_state_t *ms = arg;
int ret = 0;
if (zfs_share(zhp) != 0)
ret = ms->ms_mntstatus = -1;
return (ret);
}
/*
* Thread pool function to mount one file system. On completion, it finds and
* schedules its children to be mounted. This depends on the sorting done in
* zfs_foreach_mountpoint(). Note that the degenerate case (chain of entries
* each descending from the previous) will have no parallelism since we always
* have to wait for the parent to finish mounting before we can schedule
* its children.
*/
static void
zfs_mount_task(void *arg)
{
mnt_param_t *mp = arg;
int idx = mp->mnt_idx;
zfs_handle_t **handles = mp->mnt_zhps;
size_t num_handles = mp->mnt_num_handles;
char mountpoint[ZFS_MAXPROPLEN];
verify(zfs_prop_get(handles[idx], ZFS_PROP_MOUNTPOINT, mountpoint,
sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
if (mp->mnt_func(handles[idx], mp->mnt_data) != 0)
return;
/*
* We dispatch tasks to mount filesystems with mountpoints underneath
* this one. We do this by dispatching the next filesystem with a
* descendant mountpoint of the one we just mounted, then skip all of
* its descendants, dispatch the next descendant mountpoint, and so on.
* The non_descendant_idx() function skips over filesystems that are
* descendants of the filesystem we just dispatched.
*/
for (int i = idx + 1; i < num_handles;
i = non_descendant_idx(handles, num_handles, i)) {
char child[ZFS_MAXPROPLEN];
verify(zfs_prop_get(handles[i], ZFS_PROP_MOUNTPOINT,
child, sizeof (child), NULL, NULL, 0, B_FALSE) == 0);
if (!libzfs_path_contains(mountpoint, child))
break; /* not a descendant, return */
zfs_dispatch_mount(mp->mnt_hdl, handles, num_handles, i,
mp->mnt_func, mp->mnt_data, mp->mnt_tp);
}
free(mp);
}
/*
* Issue the func callback for each ZFS handle contained in the handles
* array. This function is used to mount all datasets, and so this function
* guarantees that filesystems for parent mountpoints are called before their
* children. As such, before issuing any callbacks, we first sort the array
* of handles by mountpoint.
*
* Callbacks are issued in one of two ways:
*
* 1. Sequentially: If the parallel argument is B_FALSE or the ZFS_SERIAL_MOUNT
* environment variable is set, then we issue callbacks sequentially.
*
* 2. In parallel: If the parallel argument is B_TRUE and the ZFS_SERIAL_MOUNT
* environment variable is not set, then we use a tpool to dispatch threads
* to mount filesystems in parallel. This function dispatches tasks to mount
* the filesystems at the top-level mountpoints, and these tasks in turn
* are responsible for recursively mounting filesystems in their children
* mountpoints.
*/
void
zfs_foreach_mountpoint(libzfs_handle_t *hdl, zfs_handle_t **handles,
size_t num_handles, zfs_iter_f func, void *data, boolean_t parallel)
{
zoneid_t zoneid = getzoneid();
/*
* The ZFS_SERIAL_MOUNT environment variable is an undocumented
* variable that can be used as a convenience to do a/b comparison
* of serial vs. parallel mounting.
*/
boolean_t serial_mount = !parallel ||
(getenv("ZFS_SERIAL_MOUNT") != NULL);
/*
* Sort the datasets by mountpoint. See mountpoint_cmp for details
* of how these are sorted.
*/
qsort(handles, num_handles, sizeof (zfs_handle_t *), mountpoint_cmp);
if (serial_mount) {
for (int i = 0; i < num_handles; i++) {
func(handles[i], data);
}
return;
}
/*
* Issue the callback function for each dataset using a parallel
* algorithm that uses a thread pool to manage threads.
*/
tpool_t *tp = tpool_create(1, mount_tp_nthr, 0, NULL);
/*
* There may be multiple "top level" mountpoints outside of the pool's
* root mountpoint, e.g.: /foo /bar. Dispatch a mount task for each of
* these.
*/
for (int i = 0; i < num_handles;
i = non_descendant_idx(handles, num_handles, i)) {
/*
* Since the mountpoints have been sorted so that the zoned
* filesystems are at the end, a zoned filesystem seen from
* the global zone means that we're done.
*/
if (zoneid == GLOBAL_ZONEID &&
zfs_prop_get_int(handles[i], ZFS_PROP_ZONED))
break;
zfs_dispatch_mount(hdl, handles, num_handles, i, func, data,
tp);
}
tpool_wait(tp); /* wait for all scheduled mounts to complete */
tpool_destroy(tp);
}
/*
* Mount and share all datasets within the given pool. This assumes that no
* datasets within the pool are currently mounted.
*/
int
zpool_enable_datasets(zpool_handle_t *zhp, const char *mntopts, int flags)
{
get_all_cb_t cb = { 0 };
mount_state_t ms = { 0 };
zfs_handle_t *zfsp;
int ret = 0;
if ((zfsp = zfs_open(zhp->zpool_hdl, zhp->zpool_name,
ZFS_TYPE_DATASET)) == NULL)
goto out;
/*
* Gather all non-snapshot datasets within the pool. Start by adding
* the root filesystem for this pool to the list, and then iterate
* over all child filesystems.
*/
libzfs_add_handle(&cb, zfsp);
if (zfs_iter_filesystems(zfsp, zfs_iter_cb, &cb) != 0)
goto out;
/*
* Mount all filesystems
*/
ms.ms_mntopts = mntopts;
ms.ms_mntflags = flags;
zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
zfs_mount_one, &ms, B_TRUE);
if (ms.ms_mntstatus != 0)
ret = ms.ms_mntstatus;
/*
* Share all filesystems that need to be shared. This needs to be
* a separate pass because libshare is not mt-safe, and so we need
* to share serially.
*/
ms.ms_mntstatus = 0;
zfs_foreach_mountpoint(zhp->zpool_hdl, cb.cb_handles, cb.cb_used,
zfs_share_one, &ms, B_FALSE);
if (ms.ms_mntstatus != 0)
ret = ms.ms_mntstatus;
else
zfs_commit_all_shares();
out:
for (int i = 0; i < cb.cb_used; i++)
zfs_close(cb.cb_handles[i]);
free(cb.cb_handles);
return (ret);
}
struct sets_s {
char *mountpoint;
zfs_handle_t *dataset;
};
static int
mountpoint_compare(const void *a, const void *b)
{
const struct sets_s *mounta = (struct sets_s *)a;
const struct sets_s *mountb = (struct sets_s *)b;
return (strcmp(mountb->mountpoint, mounta->mountpoint));
}
/*
* Unshare and unmount all datasets within the given pool. We don't want to
* rely on traversing the DSL to discover the filesystems within the pool,
* because this may be expensive (if not all of them are mounted), and can fail
* arbitrarily (on I/O error, for example). Instead, we walk /proc/self/mounts
* and gather all the filesystems that are currently mounted.
*/
int
zpool_disable_datasets(zpool_handle_t *zhp, boolean_t force)
{
int used, alloc;
FILE *mnttab;
struct mnttab entry;
size_t namelen;
struct sets_s *sets = NULL;
libzfs_handle_t *hdl = zhp->zpool_hdl;
int i;
int ret = -1;
int flags = (force ? MS_FORCE : 0);
namelen = strlen(zhp->zpool_name);
if ((mnttab = fopen(MNTTAB, "re")) == NULL)
return (ENOENT);
used = alloc = 0;
while (getmntent(mnttab, &entry) == 0) {
/*
* Ignore non-ZFS entries.
*/
if (entry.mnt_fstype == NULL ||
strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
continue;
/*
* Ignore filesystems not within this pool.
*/
if (entry.mnt_mountp == NULL ||
strncmp(entry.mnt_special, zhp->zpool_name, namelen) != 0 ||
(entry.mnt_special[namelen] != '/' &&
entry.mnt_special[namelen] != '\0'))
continue;
/*
* At this point we've found a filesystem within our pool. Add
* it to our growing list.
*/
if (used == alloc) {
if (alloc == 0) {
if ((sets = zfs_alloc(hdl,
8 * sizeof (struct sets_s))) == NULL)
goto out;
alloc = 8;
} else {
void *ptr;
if ((ptr = zfs_realloc(hdl, sets,
alloc * sizeof (struct sets_s),
alloc * 2 * sizeof (struct sets_s)))
== NULL)
goto out;
sets = ptr;
alloc *= 2;
}
}
if ((sets[used].mountpoint = zfs_strdup(hdl,
entry.mnt_mountp)) == NULL)
goto out;
/*
* This is allowed to fail, in case there is some I/O error. It
* is only used to determine if we need to remove the underlying
* mountpoint, so failure is not fatal.
*/
sets[used].dataset = make_dataset_handle(hdl,
entry.mnt_special);
used++;
}
/*
* At this point, we have the entire list of filesystems, so sort it by
* mountpoint.
*/
qsort(sets, used, sizeof (struct sets_s), mountpoint_compare);
/*
* Walk through and first unshare everything.
*/
for (i = 0; i < used; i++) {
zfs_share_proto_t *curr_proto;
for (curr_proto = share_all_proto; *curr_proto != PROTO_END;
curr_proto++) {
if (is_shared(sets[i].mountpoint, *curr_proto) &&
unshare_one(hdl, sets[i].mountpoint,
sets[i].mountpoint, *curr_proto) != 0)
goto out;
}
}
zfs_commit_all_shares();
/*
* Now unmount everything, removing the underlying directories as
* appropriate.
*/
for (i = 0; i < used; i++) {
if (unmount_one(sets[i].dataset, sets[i].mountpoint,
flags) != 0)
goto out;
}
for (i = 0; i < used; i++) {
if (sets[i].dataset)
remove_mountpoint(sets[i].dataset);
}
+ zpool_disable_datasets_os(zhp, force);
+
ret = 0;
out:
(void) fclose(mnttab);
for (i = 0; i < used; i++) {
if (sets[i].dataset)
zfs_close(sets[i].dataset);
free(sets[i].mountpoint);
}
free(sets);
return (ret);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c b/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
index 7338b9d72cad..57dea98cc7d9 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_pool.c
@@ -1,4950 +1,4950 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2018 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
#include <errno.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <libgen.h>
#include <zone.h>
#include <sys/stat.h>
#include <sys/efi_partition.h>
#include <sys/systeminfo.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_sysfs.h>
#include <sys/vdev_disk.h>
#include <sys/types.h>
#include <dlfcn.h>
#include <libzutil.h>
#include <fcntl.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "libzfs_impl.h"
#include "zfs_comutil.h"
#include "zfeature_common.h"
static boolean_t zpool_vdev_is_interior(const char *name);
typedef struct prop_flags {
int create:1; /* Validate property on creation */
int import:1; /* Validate property on import */
} prop_flags_t;
/*
* ====================================================================
* zpool property functions
* ====================================================================
*/
static int
zpool_get_all_props(zpool_handle_t *zhp)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
return (-1);
while (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
if (errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
} else {
zcmd_free_nvlists(&zc);
return (-1);
}
}
if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
zcmd_free_nvlists(&zc);
return (0);
}
int
zpool_props_refresh(zpool_handle_t *zhp)
{
nvlist_t *old_props;
old_props = zhp->zpool_props;
if (zpool_get_all_props(zhp) != 0)
return (-1);
nvlist_free(old_props);
return (0);
}
static const char *
zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
zprop_source_t *src)
{
nvlist_t *nv, *nvl;
uint64_t ival;
char *value;
zprop_source_t source;
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
source = ival;
verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
} else {
source = ZPROP_SRC_DEFAULT;
if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
value = "-";
}
if (src)
*src = source;
return (value);
}
uint64_t
zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
{
nvlist_t *nv, *nvl;
uint64_t value;
zprop_source_t source;
if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
/*
* zpool_get_all_props() has most likely failed because
* the pool is faulted, but if all we need is the top level
* vdev's guid then get it from the zhp config nvlist.
*/
if ((prop == ZPOOL_PROP_GUID) &&
(nvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
== 0)) {
return (value);
}
return (zpool_prop_default_numeric(prop));
}
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
source = value;
verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
} else {
source = ZPROP_SRC_DEFAULT;
value = zpool_prop_default_numeric(prop);
}
if (src)
*src = source;
return (value);
}
/*
* Map VDEV STATE to printed strings.
*/
const char *
zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
{
switch (state) {
case VDEV_STATE_CLOSED:
case VDEV_STATE_OFFLINE:
return (gettext("OFFLINE"));
case VDEV_STATE_REMOVED:
return (gettext("REMOVED"));
case VDEV_STATE_CANT_OPEN:
if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
return (gettext("FAULTED"));
else if (aux == VDEV_AUX_SPLIT_POOL)
return (gettext("SPLIT"));
else
return (gettext("UNAVAIL"));
case VDEV_STATE_FAULTED:
return (gettext("FAULTED"));
case VDEV_STATE_DEGRADED:
return (gettext("DEGRADED"));
case VDEV_STATE_HEALTHY:
return (gettext("ONLINE"));
default:
break;
}
return (gettext("UNKNOWN"));
}
/*
* Map POOL STATE to printed strings.
*/
const char *
zpool_pool_state_to_name(pool_state_t state)
{
switch (state) {
default:
break;
case POOL_STATE_ACTIVE:
return (gettext("ACTIVE"));
case POOL_STATE_EXPORTED:
return (gettext("EXPORTED"));
case POOL_STATE_DESTROYED:
return (gettext("DESTROYED"));
case POOL_STATE_SPARE:
return (gettext("SPARE"));
case POOL_STATE_L2CACHE:
return (gettext("L2CACHE"));
case POOL_STATE_UNINITIALIZED:
return (gettext("UNINITIALIZED"));
case POOL_STATE_UNAVAIL:
return (gettext("UNAVAIL"));
case POOL_STATE_POTENTIALLY_ACTIVE:
return (gettext("POTENTIALLY_ACTIVE"));
}
return (gettext("UNKNOWN"));
}
/*
* Given a pool handle, return the pool health string ("ONLINE", "DEGRADED",
* "SUSPENDED", etc).
*/
const char *
zpool_get_state_str(zpool_handle_t *zhp)
{
zpool_errata_t errata;
zpool_status_t status;
nvlist_t *nvroot;
vdev_stat_t *vs;
uint_t vsc;
const char *str;
status = zpool_get_status(zhp, NULL, &errata);
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
str = gettext("FAULTED");
} else if (status == ZPOOL_STATUS_IO_FAILURE_WAIT ||
status == ZPOOL_STATUS_IO_FAILURE_MMP) {
str = gettext("SUSPENDED");
} else {
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
== 0);
str = zpool_state_to_name(vs->vs_state, vs->vs_aux);
}
return (str);
}
/*
* Get a zpool property value for 'prop' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
size_t len, zprop_source_t *srctype, boolean_t literal)
{
uint64_t intval;
const char *strval;
zprop_source_t src = ZPROP_SRC_NONE;
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
switch (prop) {
case ZPOOL_PROP_NAME:
(void) strlcpy(buf, zpool_get_name(zhp), len);
break;
case ZPOOL_PROP_HEALTH:
(void) strlcpy(buf, zpool_get_state_str(zhp), len);
break;
case ZPOOL_PROP_GUID:
intval = zpool_get_prop_int(zhp, prop, &src);
(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
break;
case ZPOOL_PROP_ALTROOT:
case ZPOOL_PROP_CACHEFILE:
case ZPOOL_PROP_COMMENT:
case ZPOOL_PROP_COMPATIBILITY:
if (zhp->zpool_props != NULL ||
zpool_get_all_props(zhp) == 0) {
(void) strlcpy(buf,
zpool_get_prop_string(zhp, prop, &src),
len);
break;
}
- /* FALLTHROUGH */
+ fallthrough;
default:
(void) strlcpy(buf, "-", len);
break;
}
if (srctype != NULL)
*srctype = src;
return (0);
}
if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
prop != ZPOOL_PROP_NAME)
return (-1);
switch (zpool_prop_get_type(prop)) {
case PROP_TYPE_STRING:
(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
len);
break;
case PROP_TYPE_NUMBER:
intval = zpool_get_prop_int(zhp, prop, &src);
switch (prop) {
case ZPOOL_PROP_SIZE:
case ZPOOL_PROP_ALLOCATED:
case ZPOOL_PROP_FREE:
case ZPOOL_PROP_FREEING:
case ZPOOL_PROP_LEAKED:
case ZPOOL_PROP_ASHIFT:
if (literal)
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
else
(void) zfs_nicenum(intval, buf, len);
break;
case ZPOOL_PROP_EXPANDSZ:
case ZPOOL_PROP_CHECKPOINT:
if (intval == 0) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) zfs_nicebytes(intval, buf, len);
}
break;
case ZPOOL_PROP_CAPACITY:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case ZPOOL_PROP_FRAGMENTATION:
if (intval == UINT64_MAX) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case ZPOOL_PROP_DEDUPRATIO:
if (literal)
(void) snprintf(buf, len, "%llu.%02llu",
(u_longlong_t)(intval / 100),
(u_longlong_t)(intval % 100));
else
(void) snprintf(buf, len, "%llu.%02llux",
(u_longlong_t)(intval / 100),
(u_longlong_t)(intval % 100));
break;
case ZPOOL_PROP_HEALTH:
(void) strlcpy(buf, zpool_get_state_str(zhp), len);
break;
case ZPOOL_PROP_VERSION:
if (intval >= SPA_VERSION_FEATURES) {
(void) snprintf(buf, len, "-");
break;
}
- /* FALLTHROUGH */
+ fallthrough;
default:
(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
}
break;
case PROP_TYPE_INDEX:
intval = zpool_get_prop_int(zhp, prop, &src);
if (zpool_prop_index_to_string(prop, intval, &strval)
!= 0)
return (-1);
(void) strlcpy(buf, strval, len);
break;
default:
abort();
}
if (srctype)
*srctype = src;
return (0);
}
/*
* Check if the bootfs name has the same pool name as it is set to.
* Assuming bootfs is a valid dataset name.
*/
static boolean_t
bootfs_name_valid(const char *pool, const char *bootfs)
{
int len = strlen(pool);
if (bootfs[0] == '\0')
return (B_TRUE);
if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
return (B_FALSE);
if (strncmp(pool, bootfs, len) == 0 &&
(bootfs[len] == '/' || bootfs[len] == '\0'))
return (B_TRUE);
return (B_FALSE);
}
/*
* Given an nvlist of zpool properties to be set, validate that they are
* correct, and parse any numeric properties (index, boolean, etc) if they are
* specified as strings.
*/
static nvlist_t *
zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
{
nvpair_t *elem;
nvlist_t *retprops;
zpool_prop_t prop;
char *strval;
uint64_t intval;
char *slash, *check;
struct stat64 statbuf;
zpool_handle_t *zhp;
char report[1024];
if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
const char *propname = nvpair_name(elem);
prop = zpool_name_to_prop(propname);
if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
int err;
char *fname = strchr(propname, '@') + 1;
err = zfeature_lookup_name(fname, NULL);
if (err != 0) {
ASSERT3U(err, ==, ENOENT);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"feature '%s' unsupported by kernel"),
fname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (nvpair_type(elem) != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
(void) nvpair_value_string(elem, &strval);
if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set to "
"'enabled' or 'disabled'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (!flags.create &&
strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set to "
"'disabled' at creation time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (nvlist_add_uint64(retprops, propname, 0) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
}
/*
* Make sure this property is valid and applies to this type.
*/
if (prop == ZPOOL_PROP_INVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zpool_prop_readonly(prop)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is readonly"), propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (!flags.create && zpool_prop_setonce(prop)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set at "
"creation time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
&strval, &intval, errbuf) != 0)
goto error;
/*
* Perform additional checking for specific properties.
*/
switch (prop) {
case ZPOOL_PROP_VERSION:
if (intval < version ||
!SPA_VERSION_IS_SUPPORTED(intval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' number %llu is invalid."),
propname, (unsigned long long)intval);
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
goto error;
}
break;
case ZPOOL_PROP_ASHIFT:
if (intval != 0 &&
(intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' number %llu is invalid, "
"only values between %" PRId32 " and %"
PRId32 " are allowed."),
propname, (unsigned long long)intval,
ASHIFT_MIN, ASHIFT_MAX);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_BOOTFS:
if (flags.create || flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' cannot be set at creation "
"or import time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (version < SPA_VERSION_BOOTFS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to support "
"'%s' property"), propname);
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
goto error;
}
/*
* bootfs property value has to be a dataset name and
* the dataset has to be in the same pool as it sets to.
*/
if (!bootfs_name_valid(poolname, strval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is an invalid name"), strval);
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto error;
}
if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"could not open pool '%s'"), poolname);
(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
goto error;
}
zpool_close(zhp);
break;
case ZPOOL_PROP_ALTROOT:
if (!flags.create && !flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set during pool "
"creation or import"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (strval[0] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bad alternate root '%s'"), strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
break;
case ZPOOL_PROP_CACHEFILE:
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' must be empty, an "
"absolute path, or 'none'"), propname);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
slash = strrchr(strval, '/');
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid file"), strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
*slash = '\0';
if (strval[0] != '\0' &&
(stat64(strval, &statbuf) != 0 ||
!S_ISDIR(statbuf.st_mode))) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid directory"),
strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
*slash = '/';
break;
case ZPOOL_PROP_COMPATIBILITY:
switch (zpool_load_compat(strval, NULL, report, 1024)) {
case ZPOOL_COMPATIBILITY_OK:
case ZPOOL_COMPATIBILITY_WARNTOKEN:
break;
case ZPOOL_COMPATIBILITY_BADFILE:
case ZPOOL_COMPATIBILITY_BADTOKEN:
case ZPOOL_COMPATIBILITY_NOFILES:
zfs_error_aux(hdl, "%s", report);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_COMMENT:
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"comment may only have printable "
"characters"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
}
if (strlen(strval) > ZPROP_MAX_COMMENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"comment must not exceed %d characters"),
ZPROP_MAX_COMMENT);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_READONLY:
if (!flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set at "
"import time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_MULTIHOST:
if (get_system_hostid() == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"requires a non-zero system hostid"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_DEDUPDITTO:
printf("Note: property '%s' no longer has "
"any effect\n", propname);
break;
default:
break;
}
}
return (retprops);
error:
nvlist_free(retprops);
return (NULL);
}
/*
* Set zpool property : propname=propval.
*/
int
zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
{
zfs_cmd_t zc = {"\0"};
int ret = -1;
char errbuf[1024];
nvlist_t *nvl = NULL;
nvlist_t *realprops;
uint64_t version;
prop_flags_t flags = { 0 };
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zpool_name);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
if (nvlist_add_string(nvl, propname, propval) != 0) {
nvlist_free(nvl);
return (no_memory(zhp->zpool_hdl));
}
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
nvlist_free(nvl);
return (-1);
}
nvlist_free(nvl);
nvl = realprops;
/*
* Execute the corresponding ioctl() to set this property.
*/
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
nvlist_free(nvl);
return (-1);
}
ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
zcmd_free_nvlists(&zc);
nvlist_free(nvl);
if (ret)
(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
else
(void) zpool_props_refresh(zhp);
return (ret);
}
int
zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp,
boolean_t literal)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
zprop_list_t *entry;
char buf[ZFS_MAXPROPLEN];
nvlist_t *features = NULL;
nvpair_t *nvp;
zprop_list_t **last;
boolean_t firstexpand = (NULL == *plp);
int i;
if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
return (-1);
last = plp;
while (*last != NULL)
last = &(*last)->pl_next;
if ((*plp)->pl_all)
features = zpool_get_features(zhp);
if ((*plp)->pl_all && firstexpand) {
for (i = 0; i < SPA_FEATURES; i++) {
zprop_list_t *entry = zfs_alloc(hdl,
sizeof (zprop_list_t));
entry->pl_prop = ZPROP_INVAL;
entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
spa_feature_table[i].fi_uname);
entry->pl_width = strlen(entry->pl_user_prop);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
}
/* add any unsupported features */
for (nvp = nvlist_next_nvpair(features, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
char *propname;
boolean_t found;
zprop_list_t *entry;
if (zfeature_is_supported(nvpair_name(nvp)))
continue;
propname = zfs_asprintf(hdl, "unsupported@%s",
nvpair_name(nvp));
/*
* Before adding the property to the list make sure that no
* other pool already added the same property.
*/
found = B_FALSE;
entry = *plp;
while (entry != NULL) {
if (entry->pl_user_prop != NULL &&
strcmp(propname, entry->pl_user_prop) == 0) {
found = B_TRUE;
break;
}
entry = entry->pl_next;
}
if (found) {
free(propname);
continue;
}
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
entry->pl_prop = ZPROP_INVAL;
entry->pl_user_prop = propname;
entry->pl_width = strlen(entry->pl_user_prop);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed && !literal)
continue;
if (entry->pl_prop != ZPROP_INVAL &&
zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
NULL, literal) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
}
return (0);
}
/*
* Get the state for the given feature on the given ZFS pool.
*/
int
zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
size_t len)
{
uint64_t refcount;
boolean_t found = B_FALSE;
nvlist_t *features = zpool_get_features(zhp);
boolean_t supported;
const char *feature = strchr(propname, '@') + 1;
supported = zpool_prop_feature(propname);
ASSERT(supported || zpool_prop_unsupported(propname));
/*
* Convert from feature name to feature guid. This conversion is
* unnecessary for unsupported@... properties because they already
* use guids.
*/
if (supported) {
int ret;
spa_feature_t fid;
ret = zfeature_lookup_name(feature, &fid);
if (ret != 0) {
(void) strlcpy(buf, "-", len);
return (ENOTSUP);
}
feature = spa_feature_table[fid].fi_guid;
}
if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
found = B_TRUE;
if (supported) {
if (!found) {
(void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
} else {
if (refcount == 0)
(void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
else
(void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
}
} else {
if (found) {
if (refcount == 0) {
(void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
} else {
(void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
}
} else {
(void) strlcpy(buf, "-", len);
return (ENOTSUP);
}
}
return (0);
}
/*
* Validate the given pool name, optionally putting an extended error message in
* 'buf'.
*/
boolean_t
zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
{
namecheck_err_t why;
char what;
int ret;
ret = pool_namecheck(pool, &why, &what);
/*
* The rules for reserved pool names were extended at a later point.
* But we need to support users with existing pools that may now be
* invalid. So we only check for this expanded set of names during a
* create (or import), and only in userland.
*/
if (ret == 0 && !isopen &&
(strncmp(pool, "mirror", 6) == 0 ||
strncmp(pool, "raidz", 5) == 0 ||
strncmp(pool, "draid", 5) == 0 ||
strncmp(pool, "spare", 5) == 0 ||
strcmp(pool, "log") == 0)) {
if (hdl != NULL)
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "name is reserved"));
return (B_FALSE);
}
if (ret != 0) {
if (hdl != NULL) {
switch (why) {
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "name is too long"));
break;
case NAME_ERR_INVALCHAR:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "invalid character "
"'%c' in pool name"), what);
break;
case NAME_ERR_NOLETTER:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name must begin with a letter"));
break;
case NAME_ERR_RESERVED:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is reserved"));
break;
case NAME_ERR_DISKLIKE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool name is reserved"));
break;
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"leading slash in name"));
break;
case NAME_ERR_EMPTY_COMPONENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty component in name"));
break;
case NAME_ERR_TRAILING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"trailing slash in name"));
break;
case NAME_ERR_MULTIPLE_DELIMITERS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple '@' and/or '#' delimiters in "
"name"));
break;
case NAME_ERR_NO_AT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"permission set is missing '@'"));
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"(%d) not defined"), why);
break;
}
}
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Open a handle to the given pool, even if the pool is currently in the FAULTED
* state.
*/
zpool_handle_t *
zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
{
zpool_handle_t *zhp;
boolean_t missing;
/*
* Make sure the pool name is valid.
*/
if (!zpool_name_valid(hdl, B_TRUE, pool)) {
(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
dgettext(TEXT_DOMAIN, "cannot open '%s'"),
pool);
return (NULL);
}
if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
return (NULL);
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (NULL);
}
if (missing) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
(void) zfs_error_fmt(hdl, EZFS_NOENT,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
zpool_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Like the above, but silent on error. Used when iterating over pools (because
* the configuration cache may be out of date).
*/
int
zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
{
zpool_handle_t *zhp;
boolean_t missing;
if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
return (-1);
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (-1);
}
if (missing) {
zpool_close(zhp);
*ret = NULL;
return (0);
}
*ret = zhp;
return (0);
}
/*
* Similar to zpool_open_canfail(), but refuses to open pools in the faulted
* state.
*/
zpool_handle_t *
zpool_open(libzfs_handle_t *hdl, const char *pool)
{
zpool_handle_t *zhp;
if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
return (NULL);
if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
zpool_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Close the handle. Simply frees the memory associated with the handle.
*/
void
zpool_close(zpool_handle_t *zhp)
{
nvlist_free(zhp->zpool_config);
nvlist_free(zhp->zpool_old_config);
nvlist_free(zhp->zpool_props);
free(zhp);
}
/*
* Return the name of the pool.
*/
const char *
zpool_get_name(zpool_handle_t *zhp)
{
return (zhp->zpool_name);
}
/*
* Return the state of the pool (ACTIVE or UNAVAILABLE)
*/
int
zpool_get_state(zpool_handle_t *zhp)
{
return (zhp->zpool_state);
}
/*
* Check if vdev list contains a special vdev
*/
static boolean_t
zpool_has_special_vdev(nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN, &child,
&children) == 0) {
for (uint_t c = 0; c < children; c++) {
char *bias;
if (nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0 &&
strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0) {
return (B_TRUE);
}
}
}
return (B_FALSE);
}
/*
* Check if vdev list contains a dRAID vdev
*/
static boolean_t
zpool_has_draid_vdev(nvlist_t *nvroot)
{
nvlist_t **child;
uint_t children;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (uint_t c = 0; c < children; c++) {
char *type;
if (nvlist_lookup_string(child[c],
ZPOOL_CONFIG_TYPE, &type) == 0 &&
strcmp(type, VDEV_TYPE_DRAID) == 0) {
return (B_TRUE);
}
}
}
return (B_FALSE);
}
/*
* Output a dRAID top-level vdev name in to the provided buffer.
*/
static char *
zpool_draid_name(char *name, int len, uint64_t data, uint64_t parity,
uint64_t spares, uint64_t children)
{
snprintf(name, len, "%s%llu:%llud:%lluc:%llus",
VDEV_TYPE_DRAID, (u_longlong_t)parity, (u_longlong_t)data,
(u_longlong_t)children, (u_longlong_t)spares);
return (name);
}
/*
* Return B_TRUE if the provided name is a dRAID spare name.
*/
boolean_t
zpool_is_draid_spare(const char *name)
{
uint64_t spare_id, parity, vdev_id;
if (sscanf(name, VDEV_TYPE_DRAID "%llu-%llu-%llu",
(u_longlong_t *)&parity, (u_longlong_t *)&vdev_id,
(u_longlong_t *)&spare_id) == 3) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Create the named pool, using the provided vdev list. It is assumed
* that the consumer has already validated the contents of the nvlist, so we
* don't have to worry about error semantics.
*/
int
zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
nvlist_t *props, nvlist_t *fsprops)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *zc_fsprops = NULL;
nvlist_t *zc_props = NULL;
nvlist_t *hidden_args = NULL;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
char msg[1024];
int ret = -1;
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), pool);
if (!zpool_name_valid(hdl, B_FALSE, pool))
return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
return (-1);
if (props) {
prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
if ((zc_props = zpool_valid_proplist(hdl, pool, props,
SPA_VERSION_1, flags, msg)) == NULL) {
goto create_failed;
}
}
if (fsprops) {
uint64_t zoned;
char *zonestr;
zoned = ((nvlist_lookup_string(fsprops,
zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
strcmp(zonestr, "on") == 0);
if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) {
goto create_failed;
}
if (nvlist_exists(zc_fsprops,
zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS)) &&
!zpool_has_special_vdev(nvroot)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"%s property requires a special vdev"),
zfs_prop_to_name(ZFS_PROP_SPECIAL_SMALL_BLOCKS));
(void) zfs_error(hdl, EZFS_BADPROP, msg);
goto create_failed;
}
if (!zc_props &&
(nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
goto create_failed;
}
if (zfs_crypto_create(hdl, NULL, zc_fsprops, props, B_TRUE,
&wkeydata, &wkeylen) != 0) {
zfs_error(hdl, EZFS_CRYPTOFAILED, msg);
goto create_failed;
}
if (nvlist_add_nvlist(zc_props,
ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
goto create_failed;
}
if (wkeydata != NULL) {
if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
goto create_failed;
if (nvlist_add_uint8_array(hidden_args, "wkeydata",
wkeydata, wkeylen) != 0)
goto create_failed;
if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
hidden_args) != 0)
goto create_failed;
}
}
if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
goto create_failed;
(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(zc_fsprops);
nvlist_free(hidden_args);
if (wkeydata != NULL)
free(wkeydata);
switch (errno) {
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label. This can also happen under if the device is
* part of an active md or lvm device.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more vdevs refer to the same device, or "
"one of\nthe devices is part of an active md or "
"lvm device"));
return (zfs_error(hdl, EZFS_BADDEV, msg));
case ERANGE:
/*
* This happens if the record size is smaller or larger
* than the allowed size range, or not a power of 2.
*
* NOTE: although zfs_valid_proplist is called earlier,
* this case may have slipped through since the
* pool does not exist yet and it is therefore
* impossible to read properties e.g. max blocksize
* from the pool.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"record size invalid"));
return (zfs_error(hdl, EZFS_BADPROP, msg));
case EOVERFLOW:
/*
* This occurs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
zfs_nicebytes(SPA_MINDEVSIZE, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is less than the "
"minimum size (%s)"), buf);
}
return (zfs_error(hdl, EZFS_BADDEV, msg));
case ENOSPC:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is out of space"));
return (zfs_error(hdl, EZFS_BADDEV, msg));
case EINVAL:
if (zpool_has_draid_vdev(nvroot) &&
zfeature_lookup_name("draid", NULL) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID vdevs are unsupported by the "
"kernel"));
return (zfs_error(hdl, EZFS_BADDEV, msg));
} else {
return (zpool_standard_error(hdl, errno, msg));
}
default:
return (zpool_standard_error(hdl, errno, msg));
}
}
create_failed:
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(zc_fsprops);
nvlist_free(hidden_args);
if (wkeydata != NULL)
free(wkeydata);
return (ret);
}
/*
* Destroy the given pool. It is up to the caller to ensure that there are no
* datasets left in the pool.
*/
int
zpool_destroy(zpool_handle_t *zhp, const char *log_str)
{
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zfp = NULL;
libzfs_handle_t *hdl = zhp->zpool_hdl;
char msg[1024];
if (zhp->zpool_state == POOL_STATE_ACTIVE &&
(zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_history = (uint64_t)(uintptr_t)log_str;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot destroy '%s'"), zhp->zpool_name);
if (errno == EROFS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is read only"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
} else {
(void) zpool_standard_error(hdl, errno, msg);
}
if (zfp)
zfs_close(zfp);
return (-1);
}
if (zfp) {
remove_mountpoint(zfp);
zfs_close(zfp);
}
return (0);
}
/*
* Create a checkpoint in the given pool.
*/
int
zpool_checkpoint(zpool_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
char msg[1024];
int error;
error = lzc_pool_checkpoint(zhp->zpool_name);
if (error != 0) {
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot checkpoint '%s'"), zhp->zpool_name);
(void) zpool_standard_error(hdl, error, msg);
return (-1);
}
return (0);
}
/*
* Discard the checkpoint from the given pool.
*/
int
zpool_discard_checkpoint(zpool_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
char msg[1024];
int error;
error = lzc_pool_checkpoint_discard(zhp->zpool_name);
if (error != 0) {
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot discard checkpoint in '%s'"), zhp->zpool_name);
(void) zpool_standard_error(hdl, error, msg);
return (-1);
}
return (0);
}
/*
* Add the given vdevs to the pool. The caller must have already performed the
* necessary verification to ensure that the vdev specification is well-formed.
*/
int
zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
{
zfs_cmd_t zc = {"\0"};
int ret;
libzfs_handle_t *hdl = zhp->zpool_hdl;
char msg[1024];
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot add to '%s'"), zhp->zpool_name);
if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
SPA_VERSION_SPARES &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add hot spares"));
return (zfs_error(hdl, EZFS_BADVERSION, msg));
}
if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
SPA_VERSION_L2CACHE &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add cache devices"));
return (zfs_error(hdl, EZFS_BADVERSION, msg));
}
if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
switch (errno) {
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more vdevs refer to the same device"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case EINVAL:
if (zpool_has_draid_vdev(nvroot) &&
zfeature_lookup_name("draid", NULL) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID vdevs are unsupported by the "
"kernel"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid config; a pool with removing/"
"removed vdevs does not support adding "
"raidz or dRAID vdevs"));
}
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case EOVERFLOW:
/*
* This occurs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
zfs_nicebytes(SPA_MINDEVSIZE, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device is less than the minimum "
"size (%s)"), buf);
}
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to add these vdevs"));
(void) zfs_error(hdl, EZFS_BADVERSION, msg);
break;
default:
(void) zpool_standard_error(hdl, errno, msg);
}
ret = -1;
} else {
ret = 0;
}
zcmd_free_nvlists(&zc);
return (ret);
}
/*
* Exports the pool from the system. The caller must ensure that there are no
* mounted datasets in the pool.
*/
static int
zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
const char *log_str)
{
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = force;
zc.zc_guid = hardforce;
zc.zc_history = (uint64_t)(uintptr_t)log_str;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
switch (errno) {
case EXDEV:
zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
"use '-f' to override the following errors:\n"
"'%s' has an active shared spare which could be"
" used by other pools once '%s' is exported."),
zhp->zpool_name, zhp->zpool_name);
return (zfs_error_fmt(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
dgettext(TEXT_DOMAIN, "cannot export '%s'"),
zhp->zpool_name));
default:
return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
dgettext(TEXT_DOMAIN, "cannot export '%s'"),
zhp->zpool_name));
}
}
return (0);
}
int
zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
{
return (zpool_export_common(zhp, force, B_FALSE, log_str));
}
int
zpool_export_force(zpool_handle_t *zhp, const char *log_str)
{
return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
}
static void
zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
nvlist_t *config)
{
nvlist_t *nv = NULL;
uint64_t rewindto;
int64_t loss = -1;
struct tm t;
char timestr[128];
if (!hdl->libzfs_printerr || config == NULL)
return;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
return;
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
return;
(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
if (localtime_r((time_t *)&rewindto, &t) != NULL &&
strftime(timestr, 128, "%c", &t) != 0) {
if (dryrun) {
(void) printf(dgettext(TEXT_DOMAIN,
"Would be able to return %s "
"to its state as of %s.\n"),
name, timestr);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"Pool %s returned to its state as of %s.\n"),
name, timestr);
}
if (loss > 120) {
(void) printf(dgettext(TEXT_DOMAIN,
"%s approximately %lld "),
dryrun ? "Would discard" : "Discarded",
((longlong_t)loss + 30) / 60);
(void) printf(dgettext(TEXT_DOMAIN,
"minutes of transactions.\n"));
} else if (loss > 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"%s approximately %lld "),
dryrun ? "Would discard" : "Discarded",
(longlong_t)loss);
(void) printf(dgettext(TEXT_DOMAIN,
"seconds of transactions.\n"));
}
}
}
void
zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
nvlist_t *config)
{
nvlist_t *nv = NULL;
int64_t loss = -1;
uint64_t edata = UINT64_MAX;
uint64_t rewindto;
struct tm t;
char timestr[128];
if (!hdl->libzfs_printerr)
return;
if (reason >= 0)
(void) printf(dgettext(TEXT_DOMAIN, "action: "));
else
(void) printf(dgettext(TEXT_DOMAIN, "\t"));
/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
goto no_info;
(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
&edata);
(void) printf(dgettext(TEXT_DOMAIN,
"Recovery is possible, but will result in some data loss.\n"));
if (localtime_r((time_t *)&rewindto, &t) != NULL &&
strftime(timestr, 128, "%c", &t) != 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"\tReturning the pool to its state as of %s\n"
"\tshould correct the problem. "),
timestr);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"\tReverting the pool to an earlier state "
"should correct the problem.\n\t"));
}
if (loss > 120) {
(void) printf(dgettext(TEXT_DOMAIN,
"Approximately %lld minutes of data\n"
"\tmust be discarded, irreversibly. "),
((longlong_t)loss + 30) / 60);
} else if (loss > 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"Approximately %lld seconds of data\n"
"\tmust be discarded, irreversibly. "),
(longlong_t)loss);
}
if (edata != 0 && edata != UINT64_MAX) {
if (edata == 1) {
(void) printf(dgettext(TEXT_DOMAIN,
"After rewind, at least\n"
"\tone persistent user-data error will remain. "));
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"After rewind, several\n"
"\tpersistent user-data errors will remain. "));
}
}
(void) printf(dgettext(TEXT_DOMAIN,
"Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
reason >= 0 ? "clear" : "import", name);
(void) printf(dgettext(TEXT_DOMAIN,
"A scrub of the pool\n"
"\tis strongly recommended after recovery.\n"));
return;
no_info:
(void) printf(dgettext(TEXT_DOMAIN,
"Destroy and re-create the pool from\n\ta backup source.\n"));
}
/*
* zpool_import() is a contracted interface. Should be kept the same
* if possible.
*
* Applications should use zpool_import_props() to import a pool with
* new properties value to be set.
*/
int
zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
char *altroot)
{
nvlist_t *props = NULL;
int ret;
if (altroot != NULL) {
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
return (zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
}
if (nvlist_add_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
nvlist_add_string(props,
zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
nvlist_free(props);
return (zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
}
}
ret = zpool_import_props(hdl, config, newname, props,
ZFS_IMPORT_NORMAL);
nvlist_free(props);
return (ret);
}
static void
print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
int indent)
{
nvlist_t **child;
uint_t c, children;
char *vname;
uint64_t is_log = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
&is_log);
if (name != NULL)
(void) printf("\t%*s%s%s\n", indent, "", name,
is_log ? " [log]" : "");
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
print_vdev_tree(hdl, vname, child[c], indent + 2);
free(vname);
}
}
void
zpool_print_unsup_feat(nvlist_t *config)
{
nvlist_t *nvinfo, *unsup_feat;
nvpair_t *nvp;
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
0);
verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
&unsup_feat) == 0);
for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
char *desc;
verify(nvpair_type(nvp) == DATA_TYPE_STRING);
verify(nvpair_value_string(nvp, &desc) == 0);
if (strlen(desc) > 0)
(void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
else
(void) printf("\t%s\n", nvpair_name(nvp));
}
}
/*
* Import the given pool using the known configuration and a list of
* properties to be set. The configuration should have come from
* zpool_find_import(). The 'newname' parameters control whether the pool
* is imported with a different name.
*/
int
zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
nvlist_t *props, int flags)
{
zfs_cmd_t zc = {"\0"};
zpool_load_policy_t policy;
nvlist_t *nv = NULL;
nvlist_t *nvinfo = NULL;
nvlist_t *missing = NULL;
char *thename;
char *origname;
int ret;
int error = 0;
char errbuf[1024];
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&origname) == 0);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot import pool '%s'"), origname);
if (newname != NULL) {
if (!zpool_name_valid(hdl, B_FALSE, newname))
return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
thename = (char *)newname;
} else {
thename = origname;
}
if (props != NULL) {
uint64_t version;
prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
if ((props = zpool_valid_proplist(hdl, origname,
props, version, flags, errbuf)) == NULL)
return (-1);
if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
nvlist_free(props);
return (-1);
}
nvlist_free(props);
}
(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&zc.zc_guid) == 0);
if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
zc.zc_cookie = flags;
while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
}
if (ret != 0)
error = errno;
(void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
zcmd_free_nvlists(&zc);
zpool_get_load_policy(config, &policy);
if (error) {
char desc[1024];
char aux[256];
/*
* Dry-run failed, but we print out what success
* looks like if we found a best txg
*/
if (policy.zlp_rewind & ZPOOL_TRY_REWIND) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
B_TRUE, nv);
nvlist_free(nv);
return (-1);
}
if (newname == NULL)
(void) snprintf(desc, sizeof (desc),
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
thename);
else
(void) snprintf(desc, sizeof (desc),
dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
origname, thename);
switch (error) {
case ENOTSUP:
if (nv != NULL && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
(void) printf(dgettext(TEXT_DOMAIN, "This "
"pool uses the following feature(s) not "
"supported by this system:\n"));
zpool_print_unsup_feat(nv);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_CAN_RDONLY)) {
(void) printf(dgettext(TEXT_DOMAIN,
"All unsupported features are only "
"required for writing to the pool."
"\nThe pool can be imported using "
"'-o readonly=on'.\n"));
}
}
/*
* Unsupported version.
*/
(void) zfs_error(hdl, EZFS_BADVERSION, desc);
break;
case EREMOTEIO:
if (nv != NULL && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
char *hostname = "<unknown>";
uint64_t hostid = 0;
mmp_state_t mmp_state;
mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
if (mmp_state == MMP_STATE_ACTIVE) {
(void) snprintf(aux, sizeof (aux),
dgettext(TEXT_DOMAIN, "pool is imp"
"orted on host '%s' (hostid=%lx).\n"
"Export the pool on the other "
"system, then run 'zpool import'."),
hostname, (unsigned long) hostid);
} else if (mmp_state == MMP_STATE_NO_HOSTID) {
(void) snprintf(aux, sizeof (aux),
dgettext(TEXT_DOMAIN, "pool has "
"the multihost property on and "
"the\nsystem's hostid is not set. "
"Set a unique system hostid with "
"the zgenhostid(8) command.\n"));
}
(void) zfs_error_aux(hdl, "%s", aux);
}
(void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
break;
case EROFS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is read only"));
(void) zfs_error(hdl, EZFS_BADDEV, desc);
break;
case ENXIO:
if (nv && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
nvlist_lookup_nvlist(nvinfo,
ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"The devices below are missing or "
"corrupted, use '-m' to import the pool "
"anyway:\n"));
print_vdev_tree(hdl, NULL, missing, 2);
(void) printf("\n");
}
(void) zpool_standard_error(hdl, error, desc);
break;
case EEXIST:
(void) zpool_standard_error(hdl, error, desc);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices are already in use\n"));
(void) zfs_error(hdl, EZFS_BADDEV, desc);
break;
case ENAMETOOLONG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new name of at least one dataset is longer than "
"the maximum allowable length"));
(void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
break;
default:
(void) zpool_standard_error(hdl, error, desc);
zpool_explain_recover(hdl,
newname ? origname : thename, -error, nv);
break;
}
nvlist_free(nv);
ret = -1;
} else {
zpool_handle_t *zhp;
/*
* This should never fail, but play it safe anyway.
*/
if (zpool_open_silent(hdl, thename, &zhp) != 0)
ret = -1;
else if (zhp != NULL)
zpool_close(zhp);
if (policy.zlp_rewind &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0), nv);
}
nvlist_free(nv);
return (0);
}
return (ret);
}
/*
* Translate vdev names to guids. If a vdev_path is determined to be
* unsuitable then a vd_errlist is allocated and the vdev path and errno
* are added to it.
*/
static int
zpool_translate_vdev_guids(zpool_handle_t *zhp, nvlist_t *vds,
nvlist_t *vdev_guids, nvlist_t *guids_to_paths, nvlist_t **vd_errlist)
{
nvlist_t *errlist = NULL;
int error = 0;
for (nvpair_t *elem = nvlist_next_nvpair(vds, NULL); elem != NULL;
elem = nvlist_next_nvpair(vds, elem)) {
boolean_t spare, cache;
char *vd_path = nvpair_name(elem);
nvlist_t *tgt = zpool_find_vdev(zhp, vd_path, &spare, &cache,
NULL);
if ((tgt == NULL) || cache || spare) {
if (errlist == NULL) {
errlist = fnvlist_alloc();
error = EINVAL;
}
uint64_t err = (tgt == NULL) ? EZFS_NODEVICE :
(spare ? EZFS_ISSPARE : EZFS_ISL2CACHE);
fnvlist_add_int64(errlist, vd_path, err);
continue;
}
uint64_t guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
fnvlist_add_uint64(vdev_guids, vd_path, guid);
char msg[MAXNAMELEN];
(void) snprintf(msg, sizeof (msg), "%llu", (u_longlong_t)guid);
fnvlist_add_string(guids_to_paths, msg, vd_path);
}
if (error != 0) {
verify(errlist != NULL);
if (vd_errlist != NULL)
*vd_errlist = errlist;
else
fnvlist_free(errlist);
}
return (error);
}
static int
xlate_init_err(int err)
{
switch (err) {
case ENODEV:
return (EZFS_NODEVICE);
case EINVAL:
case EROFS:
return (EZFS_BADDEV);
case EBUSY:
return (EZFS_INITIALIZING);
case ESRCH:
return (EZFS_NO_INITIALIZE);
}
return (err);
}
/*
* Begin, suspend, or cancel the initialization (initializing of all free
* blocks) for the given vdevs in the given pool.
*/
static int
zpool_initialize_impl(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds, boolean_t wait)
{
int err;
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *guids_to_paths = fnvlist_alloc();
nvlist_t *vd_errlist = NULL;
nvlist_t *errlist;
nvpair_t *elem;
err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
guids_to_paths, &vd_errlist);
if (err != 0) {
verify(vd_errlist != NULL);
goto list_errors;
}
err = lzc_initialize(zhp->zpool_name, cmd_type,
vdev_guids, &errlist);
if (err != 0) {
if (errlist != NULL) {
vd_errlist = fnvlist_lookup_nvlist(errlist,
ZPOOL_INITIALIZE_VDEVS);
goto list_errors;
}
(void) zpool_standard_error(zhp->zpool_hdl, err,
dgettext(TEXT_DOMAIN, "operation failed"));
goto out;
}
if (wait) {
for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
elem = nvlist_next_nvpair(vdev_guids, elem)) {
uint64_t guid = fnvpair_value_uint64(elem);
err = lzc_wait_tag(zhp->zpool_name,
ZPOOL_WAIT_INITIALIZE, guid, NULL);
if (err != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl,
err, dgettext(TEXT_DOMAIN, "error "
"waiting for '%s' to initialize"),
nvpair_name(elem));
goto out;
}
}
}
goto out;
list_errors:
for (elem = nvlist_next_nvpair(vd_errlist, NULL); elem != NULL;
elem = nvlist_next_nvpair(vd_errlist, elem)) {
int64_t vd_error = xlate_init_err(fnvpair_value_int64(elem));
char *path;
if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
&path) != 0)
path = nvpair_name(elem);
(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
"cannot initialize '%s'", path);
}
out:
fnvlist_free(vdev_guids);
fnvlist_free(guids_to_paths);
if (vd_errlist != NULL)
fnvlist_free(vd_errlist);
return (err == 0 ? 0 : -1);
}
int
zpool_initialize(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds)
{
return (zpool_initialize_impl(zhp, cmd_type, vds, B_FALSE));
}
int
zpool_initialize_wait(zpool_handle_t *zhp, pool_initialize_func_t cmd_type,
nvlist_t *vds)
{
return (zpool_initialize_impl(zhp, cmd_type, vds, B_TRUE));
}
static int
xlate_trim_err(int err)
{
switch (err) {
case ENODEV:
return (EZFS_NODEVICE);
case EINVAL:
case EROFS:
return (EZFS_BADDEV);
case EBUSY:
return (EZFS_TRIMMING);
case ESRCH:
return (EZFS_NO_TRIM);
case EOPNOTSUPP:
return (EZFS_TRIM_NOTSUP);
}
return (err);
}
static int
zpool_trim_wait(zpool_handle_t *zhp, nvlist_t *vdev_guids)
{
int err;
nvpair_t *elem;
for (elem = nvlist_next_nvpair(vdev_guids, NULL); elem != NULL;
elem = nvlist_next_nvpair(vdev_guids, elem)) {
uint64_t guid = fnvpair_value_uint64(elem);
err = lzc_wait_tag(zhp->zpool_name,
ZPOOL_WAIT_TRIM, guid, NULL);
if (err != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl,
err, dgettext(TEXT_DOMAIN, "error "
"waiting to trim '%s'"), nvpair_name(elem));
return (err);
}
}
return (0);
}
/*
* Check errlist and report any errors, omitting ones which should be
* suppressed. Returns B_TRUE if any errors were reported.
*/
static boolean_t
check_trim_errs(zpool_handle_t *zhp, trimflags_t *trim_flags,
nvlist_t *guids_to_paths, nvlist_t *vds, nvlist_t *errlist)
{
nvpair_t *elem;
boolean_t reported_errs = B_FALSE;
int num_vds = 0;
int num_suppressed_errs = 0;
for (elem = nvlist_next_nvpair(vds, NULL);
elem != NULL; elem = nvlist_next_nvpair(vds, elem)) {
num_vds++;
}
for (elem = nvlist_next_nvpair(errlist, NULL);
elem != NULL; elem = nvlist_next_nvpair(errlist, elem)) {
int64_t vd_error = xlate_trim_err(fnvpair_value_int64(elem));
char *path;
/*
* If only the pool was specified, and it was not a secure
* trim then suppress warnings for individual vdevs which
* do not support trimming.
*/
if (vd_error == EZFS_TRIM_NOTSUP &&
trim_flags->fullpool &&
!trim_flags->secure) {
num_suppressed_errs++;
continue;
}
reported_errs = B_TRUE;
if (nvlist_lookup_string(guids_to_paths, nvpair_name(elem),
&path) != 0)
path = nvpair_name(elem);
(void) zfs_error_fmt(zhp->zpool_hdl, vd_error,
"cannot trim '%s'", path);
}
if (num_suppressed_errs == num_vds) {
(void) zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
"no devices in pool support trim operations"));
(void) (zfs_error(zhp->zpool_hdl, EZFS_TRIM_NOTSUP,
dgettext(TEXT_DOMAIN, "cannot trim")));
reported_errs = B_TRUE;
}
return (reported_errs);
}
/*
* Begin, suspend, or cancel the TRIM (discarding of all free blocks) for
* the given vdevs in the given pool.
*/
int
zpool_trim(zpool_handle_t *zhp, pool_trim_func_t cmd_type, nvlist_t *vds,
trimflags_t *trim_flags)
{
int err;
int retval = 0;
nvlist_t *vdev_guids = fnvlist_alloc();
nvlist_t *guids_to_paths = fnvlist_alloc();
nvlist_t *errlist = NULL;
err = zpool_translate_vdev_guids(zhp, vds, vdev_guids,
guids_to_paths, &errlist);
if (err != 0) {
check_trim_errs(zhp, trim_flags, guids_to_paths, vds, errlist);
retval = -1;
goto out;
}
err = lzc_trim(zhp->zpool_name, cmd_type, trim_flags->rate,
trim_flags->secure, vdev_guids, &errlist);
if (err != 0) {
nvlist_t *vd_errlist;
if (errlist != NULL && nvlist_lookup_nvlist(errlist,
ZPOOL_TRIM_VDEVS, &vd_errlist) == 0) {
if (check_trim_errs(zhp, trim_flags, guids_to_paths,
vds, vd_errlist)) {
retval = -1;
goto out;
}
} else {
char msg[1024];
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "operation failed"));
zpool_standard_error(zhp->zpool_hdl, err, msg);
retval = -1;
goto out;
}
}
if (trim_flags->wait)
retval = zpool_trim_wait(zhp, vdev_guids);
out:
if (errlist != NULL)
fnvlist_free(errlist);
fnvlist_free(vdev_guids);
fnvlist_free(guids_to_paths);
return (retval);
}
/*
* Scan the pool.
*/
int
zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
int err;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = func;
zc.zc_flags = cmd;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
return (0);
err = errno;
/* ECANCELED on a scrub means we resumed a paused scrub */
if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
cmd == POOL_SCRUB_NORMAL)
return (0);
if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
return (0);
if (func == POOL_SCAN_SCRUB) {
if (cmd == POOL_SCRUB_PAUSE) {
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot pause scrubbing %s"), zc.zc_name);
} else {
assert(cmd == POOL_SCRUB_NORMAL);
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot scrub %s"), zc.zc_name);
}
} else if (func == POOL_SCAN_RESILVER) {
assert(cmd == POOL_SCRUB_NORMAL);
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot restart resilver on %s"), zc.zc_name);
} else if (func == POOL_SCAN_NONE) {
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
zc.zc_name);
} else {
assert(!"unexpected result");
}
if (err == EBUSY) {
nvlist_t *nvroot;
pool_scan_stat_t *ps = NULL;
uint_t psc;
verify(nvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
if (ps && ps->pss_func == POOL_SCAN_SCRUB &&
ps->pss_state == DSS_SCANNING) {
if (cmd == POOL_SCRUB_PAUSE)
return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
else
return (zfs_error(hdl, EZFS_SCRUBBING, msg));
} else {
return (zfs_error(hdl, EZFS_RESILVERING, msg));
}
} else if (err == ENOENT) {
return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
} else if (err == ENOTSUP && func == POOL_SCAN_RESILVER) {
return (zfs_error(hdl, EZFS_NO_RESILVER_DEFER, msg));
} else {
return (zpool_standard_error(hdl, err, msg));
}
}
/*
* Find a vdev that matches the search criteria specified. We use the
* the nvpair name to determine how we should look for the device.
* 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
* spare; but FALSE if its an INUSE spare.
*/
static nvlist_t *
vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
{
uint_t c, children;
nvlist_t **child;
nvlist_t *ret;
uint64_t is_log;
char *srchkey;
nvpair_t *pair = nvlist_next_nvpair(search, NULL);
/* Nothing to look for */
if (search == NULL || pair == NULL)
return (NULL);
/* Obtain the key we will use to search */
srchkey = nvpair_name(pair);
switch (nvpair_type(pair)) {
case DATA_TYPE_UINT64:
if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
uint64_t srchval, theguid;
verify(nvpair_value_uint64(pair, &srchval) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
&theguid) == 0);
if (theguid == srchval)
return (nv);
}
break;
case DATA_TYPE_STRING: {
char *srchval, *val;
verify(nvpair_value_string(pair, &srchval) == 0);
if (nvlist_lookup_string(nv, srchkey, &val) != 0)
break;
/*
* Search for the requested value. Special cases:
*
* - ZPOOL_CONFIG_PATH for whole disk entries. These end in
* "-part1", or "p1". The suffix is hidden from the user,
* but included in the string, so this matches around it.
* - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
* is used to check all possible expanded paths.
* - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
*
* Otherwise, all other searches are simple string compares.
*/
if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
uint64_t wholedisk = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
return (nv);
} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
char *type, *idx, *end, *p;
uint64_t id, vdev_id;
/*
* Determine our vdev type, keeping in mind
* that the srchval is composed of a type and
* vdev id pair (i.e. mirror-4).
*/
if ((type = strdup(srchval)) == NULL)
return (NULL);
if ((p = strrchr(type, '-')) == NULL) {
free(type);
break;
}
idx = p + 1;
*p = '\0';
/*
* If the types don't match then keep looking.
*/
if (strncmp(val, type, strlen(val)) != 0) {
free(type);
break;
}
verify(zpool_vdev_is_interior(type));
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
&id) == 0);
errno = 0;
vdev_id = strtoull(idx, &end, 10);
/*
* If we are looking for a raidz and a parity is
* specified, make sure it matches.
*/
int rzlen = strlen(VDEV_TYPE_RAIDZ);
assert(rzlen == strlen(VDEV_TYPE_DRAID));
int typlen = strlen(type);
if ((strncmp(type, VDEV_TYPE_RAIDZ, rzlen) == 0 ||
strncmp(type, VDEV_TYPE_DRAID, rzlen) == 0) &&
typlen != rzlen) {
uint64_t vdev_parity;
int parity = *(type + rzlen) - '0';
if (parity <= 0 || parity > 3 ||
(typlen - rzlen) != 1) {
/*
* Nonsense parity specified, can
* never match
*/
free(type);
return (NULL);
}
verify(nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_NPARITY, &vdev_parity) == 0);
if ((int)vdev_parity != parity) {
free(type);
break;
}
}
free(type);
if (errno != 0)
return (NULL);
/*
* Now verify that we have the correct vdev id.
*/
if (vdev_id == id)
return (nv);
}
/*
* Common case
*/
if (strcmp(srchval, val) == 0)
return (nv);
break;
}
default:
break;
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
/*
* The 'is_log' value is only set for the toplevel
* vdev, not the leaf vdevs. So we always lookup the
* log device from the root of the vdev tree (where
* 'log' is non-NULL).
*/
if (log != NULL &&
nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
is_log) {
*log = B_TRUE;
}
return (ret);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
*avail_spare = B_TRUE;
return (ret);
}
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
*l2cache = B_TRUE;
return (ret);
}
}
}
return (NULL);
}
/*
* Given a physical path or guid, find the associated vdev.
*/
nvlist_t *
zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
{
nvlist_t *search, *nvroot, *ret;
uint64_t guid;
char *end;
verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
guid = strtoull(ppath, &end, 0);
if (guid != 0 && *end == '\0') {
verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
} else {
verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH,
ppath) == 0);
}
verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
*avail_spare = B_FALSE;
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
nvlist_free(search);
return (ret);
}
/*
* Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
*/
static boolean_t
zpool_vdev_is_interior(const char *name)
{
if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
strncmp(name,
VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
return (B_TRUE);
if (strncmp(name, VDEV_TYPE_DRAID, strlen(VDEV_TYPE_DRAID)) == 0 &&
!zpool_is_draid_spare(name))
return (B_TRUE);
return (B_FALSE);
}
nvlist_t *
zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
{
char *end;
nvlist_t *nvroot, *search, *ret;
uint64_t guid;
verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
guid = strtoull(path, &end, 0);
if (guid != 0 && *end == '\0') {
verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
} else if (zpool_vdev_is_interior(path)) {
verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
} else {
verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
}
verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
*avail_spare = B_FALSE;
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
nvlist_free(search);
return (ret);
}
static int
vdev_is_online(nvlist_t *nv)
{
uint64_t ival;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
return (0);
return (1);
}
/*
* Helper function for zpool_get_physpaths().
*/
static int
vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
size_t *bytes_written)
{
size_t bytes_left, pos, rsz;
char *tmppath;
const char *format;
if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
&tmppath) != 0)
return (EZFS_NODEVICE);
pos = *bytes_written;
bytes_left = physpath_size - pos;
format = (pos == 0) ? "%s" : " %s";
rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
*bytes_written += rsz;
if (rsz >= bytes_left) {
/* if physpath was not copied properly, clear it */
if (bytes_left != 0) {
physpath[pos] = 0;
}
return (EZFS_NOSPC);
}
return (0);
}
static int
vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
size_t *rsz, boolean_t is_spare)
{
char *type;
int ret;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (EZFS_INVALCONFIG);
if (strcmp(type, VDEV_TYPE_DISK) == 0) {
/*
* An active spare device has ZPOOL_CONFIG_IS_SPARE set.
* For a spare vdev, we only want to boot from the active
* spare device.
*/
if (is_spare) {
uint64_t spare = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
&spare);
if (!spare)
return (EZFS_INVALCONFIG);
}
if (vdev_is_online(nv)) {
if ((ret = vdev_get_one_physpath(nv, physpath,
phypath_size, rsz)) != 0)
return (ret);
}
} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
(is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
nvlist_t **child;
uint_t count;
int i, ret;
if (nvlist_lookup_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
return (EZFS_INVALCONFIG);
for (i = 0; i < count; i++) {
ret = vdev_get_physpaths(child[i], physpath,
phypath_size, rsz, is_spare);
if (ret == EZFS_NOSPC)
return (ret);
}
}
return (EZFS_POOL_INVALARG);
}
/*
* Get phys_path for a root pool config.
* Return 0 on success; non-zero on failure.
*/
static int
zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
{
size_t rsz;
nvlist_t *vdev_root;
nvlist_t **child;
uint_t count;
char *type;
rsz = 0;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&vdev_root) != 0)
return (EZFS_INVALCONFIG);
if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
&child, &count) != 0)
return (EZFS_INVALCONFIG);
/*
* root pool can only have a single top-level vdev.
*/
if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
return (EZFS_POOL_INVALARG);
(void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
B_FALSE);
/* No online devices */
if (rsz == 0)
return (EZFS_NODEVICE);
return (0);
}
/*
* Get phys_path for a root pool
* Return 0 on success; non-zero on failure.
*/
int
zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
{
return (zpool_get_config_physpath(zhp->zpool_config, physpath,
phypath_size));
}
/*
* Convert a vdev path to a GUID. Returns GUID or 0 on error.
*
* If is_spare, is_l2cache, or is_log is non-NULL, then store within it
* if the VDEV is a spare, l2cache, or log device. If they're NULL then
* ignore them.
*/
static uint64_t
zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
{
uint64_t guid;
boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
nvlist_t *tgt;
if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
&log)) == NULL)
return (0);
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
if (is_spare != NULL)
*is_spare = spare;
if (is_l2cache != NULL)
*is_l2cache = l2cache;
if (is_log != NULL)
*is_log = log;
return (guid);
}
/* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
uint64_t
zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
{
return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
}
/*
* Bring the specified vdev online. The 'flags' parameter is a set of the
* ZFS_ONLINE_* flags.
*/
int
zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
vdev_state_t *newstate)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
char *pathname;
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
int error;
if (flags & ZFS_ONLINE_EXPAND) {
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
} else {
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot online %s"), path);
}
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
if ((flags & ZFS_ONLINE_EXPAND ||
zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
uint64_t wholedisk = 0;
(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
/*
* XXX - L2ARC 1.0 devices can't support expansion.
*/
if (l2cache) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot expand cache devices"));
return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
}
if (wholedisk) {
const char *fullpath = path;
char buf[MAXPATHLEN];
if (path[0] != '/') {
error = zfs_resolve_shortname(path, buf,
sizeof (buf));
if (error != 0)
return (zfs_error(hdl, EZFS_NODEVICE,
msg));
fullpath = buf;
}
error = zpool_relabel_disk(hdl, fullpath, msg);
if (error != 0)
return (error);
}
}
zc.zc_cookie = VDEV_STATE_ONLINE;
zc.zc_obj = flags;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
if (errno == EINVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
"from this pool into a new one. Use '%s' "
"instead"), "zpool detach");
return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
}
return (zpool_standard_error(hdl, errno, msg));
}
*newstate = zc.zc_cookie;
return (0);
}
/*
* Take the specified vdev offline
*/
int
zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
zc.zc_cookie = VDEV_STATE_OFFLINE;
zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
case EBUSY:
/*
* There are no other replicas of this device.
*/
return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
case EEXIST:
/*
* The log device has unplayed logs
*/
return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
default:
return (zpool_standard_error(hdl, errno, msg));
}
}
/*
* Mark the given vdev faulted.
*/
int
zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = VDEV_STATE_FAULTED;
zc.zc_obj = aux;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
case EBUSY:
/*
* There are no other replicas of this device.
*/
return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
default:
return (zpool_standard_error(hdl, errno, msg));
}
}
/*
* Mark the given vdev degraded.
*/
int
zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = VDEV_STATE_DEGRADED;
zc.zc_obj = aux;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
}
/*
* Returns TRUE if the given nvlist is a vdev that was originally swapped in as
* a hot spare.
*/
static boolean_t
is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
{
nvlist_t **child;
uint_t c, children;
char *type;
if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
&children) == 0) {
verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
&type) == 0);
if ((strcmp(type, VDEV_TYPE_SPARE) == 0 ||
strcmp(type, VDEV_TYPE_DRAID_SPARE) == 0) &&
children == 2 && child[which] == tgt)
return (B_TRUE);
for (c = 0; c < children; c++)
if (is_replacing_spare(child[c], tgt, which))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Attach new_disk (fully described by nvroot) to old_disk.
* If 'replacing' is specified, the new disk will replace the old one.
*/
int
zpool_vdev_attach(zpool_handle_t *zhp, const char *old_disk,
const char *new_disk, nvlist_t *nvroot, int replacing, boolean_t rebuild)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
int ret;
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
uint64_t val;
char *newname;
nvlist_t **child;
uint_t children;
nvlist_t *config_root;
libzfs_handle_t *hdl = zhp->zpool_hdl;
if (replacing)
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot replace %s with %s"), old_disk, new_disk);
else
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot attach %s to %s"), new_disk, old_disk);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
if (l2cache)
return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
zc.zc_cookie = replacing;
zc.zc_simple = rebuild;
if (rebuild &&
zfeature_lookup_guid("org.openzfs:device_rebuild", NULL) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"the loaded zfs module doesn't support device rebuilds"));
return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0 || children != 1) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
}
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
return (-1);
/*
* If the target is a hot spare that has been swapped in, we can only
* replace it with another hot spare.
*/
if (replacing &&
nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
(zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
NULL) == NULL || !avail_spare) &&
is_replacing_spare(config_root, tgt, 1)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only be replaced by another hot spare"));
free(newname);
return (zfs_error(hdl, EZFS_BADTARGET, msg));
}
free(newname);
if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
return (-1);
ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
zcmd_free_nvlists(&zc);
if (ret == 0)
return (0);
switch (errno) {
case ENOTSUP:
/*
* Can't attach to or replace this type of vdev.
*/
if (replacing) {
uint64_t version = zpool_get_prop_int(zhp,
ZPOOL_PROP_VERSION, NULL);
if (islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a log with a spare"));
} else if (rebuild) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"only mirror and dRAID vdevs support "
"sequential reconstruction"));
} else if (zpool_is_draid_spare(new_disk)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID spares can only replace child "
"devices in their parent's dRAID vdev"));
} else if (version >= SPA_VERSION_MULTI_REPLACE) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"already in replacing/spare config; wait "
"for completion or use 'zpool detach'"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a replacing device"));
}
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only attach to mirrors and top-level "
"disks"));
}
(void) zfs_error(hdl, EZFS_BADTARGET, msg);
break;
case EINVAL:
/*
* The new device must be a single disk.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
"or device removal is in progress"),
new_disk);
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case EOVERFLOW:
/*
* The new device is too small.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device is too small"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case EDOM:
/*
* The new device has a different optimal sector size.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device has a different optimal sector size; use the "
"option '-o ashift=N' to override the optimal size"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case ENAMETOOLONG:
/*
* The resulting top-level vdev spec won't fit in the label.
*/
(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
break;
default:
(void) zpool_standard_error(hdl, errno, msg);
}
return (-1);
}
/*
* Detach the specified device.
*/
int
zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
if (l2cache)
return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
return (0);
switch (errno) {
case ENOTSUP:
/*
* Can't detach from this type of vdev.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
"applicable to mirror and replacing vdevs"));
(void) zfs_error(hdl, EZFS_BADTARGET, msg);
break;
case EBUSY:
/*
* There are no other replicas of this device.
*/
(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
break;
default:
(void) zpool_standard_error(hdl, errno, msg);
}
return (-1);
}
/*
* Find a mirror vdev in the source nvlist.
*
* The mchild array contains a list of disks in one of the top-level mirrors
* of the source pool. The schild array contains a list of disks that the
* user specified on the command line. We loop over the mchild array to
* see if any entry in the schild array matches.
*
* If a disk in the mchild array is found in the schild array, we return
* the index of that entry. Otherwise we return -1.
*/
static int
find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
nvlist_t **schild, uint_t schildren)
{
uint_t mc;
for (mc = 0; mc < mchildren; mc++) {
uint_t sc;
char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
mchild[mc], 0);
for (sc = 0; sc < schildren; sc++) {
char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
schild[sc], 0);
boolean_t result = (strcmp(mpath, spath) == 0);
free(spath);
if (result) {
free(mpath);
return (mc);
}
}
free(mpath);
}
return (-1);
}
/*
* Split a mirror pool. If newroot points to null, then a new nvlist
* is generated and it is the responsibility of the caller to free it.
*/
int
zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
nvlist_t *props, splitflags_t flags)
{
zfs_cmd_t zc = {"\0"};
char msg[1024], *bias;
nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
nvlist_t **varray = NULL, *zc_props = NULL;
uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t vers, readonly = B_FALSE;
boolean_t freelist = B_FALSE, memory_err = B_TRUE;
int retval = 0;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
if (!zpool_name_valid(hdl, B_FALSE, newname))
return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("Internal error: unable to "
"retrieve pool configuration\n"));
return (-1);
}
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
== 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
if (props) {
prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
props, vers, flags, msg)) == NULL)
return (-1);
(void) nvlist_lookup_uint64(zc_props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
if (readonly) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property %s can only be set at import time"),
zpool_prop_to_name(ZPOOL_PROP_READONLY));
return (-1);
}
}
if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Source pool is missing vdev tree"));
nvlist_free(zc_props);
return (-1);
}
varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
vcount = 0;
if (*newroot == NULL ||
nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
&newchild, &newchildren) != 0)
newchildren = 0;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
boolean_t is_special = B_FALSE, is_dedup = B_FALSE;
char *type;
nvlist_t **mchild, *vdev;
uint_t mchildren;
int entry;
/*
* Unlike cache & spares, slogs are stored in the
* ZPOOL_CONFIG_CHILDREN array. We filter them out here.
*/
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_log || is_hole) {
/*
* Create a hole vdev and put it in the config.
*/
if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
goto out;
if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_HOLE) != 0)
goto out;
if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
1) != 0)
goto out;
if (lastlog == 0)
lastlog = vcount;
varray[vcount++] = vdev;
continue;
}
lastlog = 0;
verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
== 0);
if (strcmp(type, VDEV_TYPE_INDIRECT) == 0) {
vdev = child[c];
if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
goto out;
continue;
} else if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Source pool must be composed only of mirrors\n"));
retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
goto out;
}
if (nvlist_lookup_string(child[c],
ZPOOL_CONFIG_ALLOCATION_BIAS, &bias) == 0) {
if (strcmp(bias, VDEV_ALLOC_BIAS_SPECIAL) == 0)
is_special = B_TRUE;
else if (strcmp(bias, VDEV_ALLOC_BIAS_DEDUP) == 0)
is_dedup = B_TRUE;
}
verify(nvlist_lookup_nvlist_array(child[c],
ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
/* find or add an entry for this top-level vdev */
if (newchildren > 0 &&
(entry = find_vdev_entry(zhp, mchild, mchildren,
newchild, newchildren)) >= 0) {
/* We found a disk that the user specified. */
vdev = mchild[entry];
++found;
} else {
/* User didn't specify a disk for this vdev. */
vdev = mchild[mchildren - 1];
}
if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
goto out;
if (flags.dryrun != 0) {
if (is_dedup == B_TRUE) {
if (nvlist_add_string(varray[vcount - 1],
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_DEDUP) != 0)
goto out;
} else if (is_special == B_TRUE) {
if (nvlist_add_string(varray[vcount - 1],
ZPOOL_CONFIG_ALLOCATION_BIAS,
VDEV_ALLOC_BIAS_SPECIAL) != 0)
goto out;
}
}
}
/* did we find every disk the user specified? */
if (found != newchildren) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
"include at most one disk from each mirror"));
retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
goto out;
}
/* Prepare the nvlist for populating. */
if (*newroot == NULL) {
if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
goto out;
freelist = B_TRUE;
if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) != 0)
goto out;
} else {
verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
}
/* Add all the children we found */
if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
lastlog == 0 ? vcount : lastlog) != 0)
goto out;
/*
* If we're just doing a dry run, exit now with success.
*/
if (flags.dryrun) {
memory_err = B_FALSE;
freelist = B_FALSE;
goto out;
}
/* now build up the config list & call the ioctl */
if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
goto out;
if (nvlist_add_nvlist(newconfig,
ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
nvlist_add_string(newconfig,
ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
goto out;
/*
* The new pool is automatically part of the namespace unless we
* explicitly export it.
*/
if (!flags.import)
zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
goto out;
if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
goto out;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
retval = zpool_standard_error(hdl, errno, msg);
goto out;
}
freelist = B_FALSE;
memory_err = B_FALSE;
out:
if (varray != NULL) {
int v;
for (v = 0; v < vcount; v++)
nvlist_free(varray[v]);
free(varray);
}
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(newconfig);
if (freelist) {
nvlist_free(*newroot);
*newroot = NULL;
}
if (retval != 0)
return (retval);
if (memory_err)
return (no_memory(hdl));
return (0);
}
/*
* Remove the given device.
*/
int
zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t version;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
if (zpool_is_draid_spare(path)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dRAID spares cannot be removed"));
return (zfs_error(hdl, EZFS_NODEVICE, msg));
}
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (islog && version < SPA_VERSION_HOLES) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to support log removal"));
return (zfs_error(hdl, EZFS_BADVERSION, msg));
}
zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
return (0);
switch (errno) {
case EINVAL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid config; all top-level vdevs must "
"have the same sector size and not be raidz."));
(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
break;
case EBUSY:
if (islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Mount encrypted datasets to replay logs."));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Pool busy; removal may already be in progress"));
}
(void) zfs_error(hdl, EZFS_BUSY, msg);
break;
case EACCES:
if (islog) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Mount encrypted datasets to replay logs."));
(void) zfs_error(hdl, EZFS_BUSY, msg);
} else {
(void) zpool_standard_error(hdl, errno, msg);
}
break;
default:
(void) zpool_standard_error(hdl, errno, msg);
}
return (-1);
}
int
zpool_vdev_remove_cancel(zpool_handle_t *zhp)
{
zfs_cmd_t zc;
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot cancel removal"));
bzero(&zc, sizeof (zc));
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = 1;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
}
int
zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
uint64_t *sizep)
{
char msg[1024];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
path);
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
if (avail_spare || l2cache || islog) {
*sizep = 0;
return (0);
}
if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"indirect size not available"));
return (zfs_error(hdl, EINVAL, msg));
}
return (0);
}
/*
* Clear the errors for the pool, or the particular device if specified.
*/
int
zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
nvlist_t *tgt;
zpool_load_policy_t policy;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
nvlist_t *nvi = NULL;
int error;
if (path)
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
path);
else
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (path) {
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
&l2cache, NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
/*
* Don't allow error clearing for hot spares. Do allow
* error clearing for l2cache devices.
*/
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
&zc.zc_guid) == 0);
}
zpool_get_load_policy(rewindnvl, &policy);
zc.zc_cookie = policy.zlp_rewind;
if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
return (-1);
if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
return (-1);
while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
}
if (!error || ((policy.zlp_rewind & ZPOOL_TRY_REWIND) &&
errno != EPERM && errno != EACCES)) {
if (policy.zlp_rewind &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
zpool_rewind_exclaim(hdl, zc.zc_name,
((policy.zlp_rewind & ZPOOL_TRY_REWIND) != 0),
nvi);
nvlist_free(nvi);
}
zcmd_free_nvlists(&zc);
return (0);
}
zcmd_free_nvlists(&zc);
return (zpool_standard_error(hdl, errno, msg));
}
/*
* Similar to zpool_clear(), but takes a GUID (used by fmd).
*/
int
zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
(u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = ZPOOL_NO_REWIND;
if (zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
}
/*
* Change the GUID for a pool.
*/
int
zpool_reguid(zpool_handle_t *zhp)
{
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
zfs_cmd_t zc = {"\0"};
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
}
/*
* Reopen the pool.
*/
int
zpool_reopen_one(zpool_handle_t *zhp, void *data)
{
libzfs_handle_t *hdl = zpool_get_handle(zhp);
const char *pool_name = zpool_get_name(zhp);
boolean_t *scrub_restart = data;
int error;
error = lzc_reopen(pool_name, *scrub_restart);
if (error) {
return (zpool_standard_error_fmt(hdl, error,
dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
}
return (0);
}
/* call into libzfs_core to execute the sync IOCTL per pool */
int
zpool_sync_one(zpool_handle_t *zhp, void *data)
{
int ret;
libzfs_handle_t *hdl = zpool_get_handle(zhp);
const char *pool_name = zpool_get_name(zhp);
boolean_t *force = data;
nvlist_t *innvl = fnvlist_alloc();
fnvlist_add_boolean_value(innvl, "force", *force);
if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
nvlist_free(innvl);
return (zpool_standard_error_fmt(hdl, ret,
dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
}
nvlist_free(innvl);
return (0);
}
#define PATH_BUF_LEN 64
/*
* Given a vdev, return the name to display in iostat. If the vdev has a path,
* we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
* We also check if this is a whole disk, in which case we strip off the
* trailing 's0' slice name.
*
* This routine is also responsible for identifying when disks have been
* reconfigured in a new location. The kernel will have opened the device by
* devid, but the path will still refer to the old location. To catch this, we
* first do a path -> devid translation (which is fast for the common case). If
* the devid matches, we're done. If not, we do a reverse devid -> path
* translation and issue the appropriate ioctl() to update the path of the vdev.
* If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
* of these checks.
*/
char *
zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
int name_flags)
{
char *path, *type, *env;
uint64_t value;
char buf[PATH_BUF_LEN];
char tmpbuf[PATH_BUF_LEN];
/*
* vdev_name will be "root"/"root-0" for the root vdev, but it is the
* zpool name that will be displayed to the user.
*/
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (zhp != NULL && strcmp(type, "root") == 0)
return (zfs_strdup(hdl, zpool_get_name(zhp)));
env = getenv("ZPOOL_VDEV_NAME_PATH");
if (env && (strtoul(env, NULL, 0) > 0 ||
!strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
name_flags |= VDEV_NAME_PATH;
env = getenv("ZPOOL_VDEV_NAME_GUID");
if (env && (strtoul(env, NULL, 0) > 0 ||
!strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
name_flags |= VDEV_NAME_GUID;
env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
if (env && (strtoul(env, NULL, 0) > 0 ||
!strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
name_flags & VDEV_NAME_GUID) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
(void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
path = buf;
} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
char *rp = realpath(path, NULL);
if (rp) {
strlcpy(buf, rp, sizeof (buf));
path = buf;
free(rp);
}
}
/*
* For a block device only use the name.
*/
if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
!(name_flags & VDEV_NAME_PATH)) {
path = zfs_strip_path(path);
}
/*
* Remove the partition from the path if this is a whole disk.
*/
if (strcmp(type, VDEV_TYPE_DRAID_SPARE) != 0 &&
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
== 0 && value && !(name_flags & VDEV_NAME_PATH)) {
return (zfs_strip_partition(path));
}
} else {
path = type;
/*
* If it's a raidz device, we need to stick in the parity level.
*/
if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
&value) == 0);
(void) snprintf(buf, sizeof (buf), "%s%llu", path,
(u_longlong_t)value);
path = buf;
}
/*
* If it's a dRAID device, we add parity, groups, and spares.
*/
if (strcmp(path, VDEV_TYPE_DRAID) == 0) {
uint64_t ndata, nparity, nspares;
nvlist_t **child;
uint_t children;
verify(nvlist_lookup_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, &child, &children) == 0);
verify(nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_NPARITY, &nparity) == 0);
verify(nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_DRAID_NDATA, &ndata) == 0);
verify(nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_DRAID_NSPARES, &nspares) == 0);
path = zpool_draid_name(buf, sizeof (buf), ndata,
nparity, nspares, children);
}
/*
* We identify each top-level vdev by using a <type-id>
* naming convention.
*/
if (name_flags & VDEV_NAME_TYPE_ID) {
uint64_t id;
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
&id) == 0);
(void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
path, (u_longlong_t)id);
path = tmpbuf;
}
}
return (zfs_strdup(hdl, path));
}
static int
zbookmark_mem_compare(const void *a, const void *b)
{
return (memcmp(a, b, sizeof (zbookmark_phys_t)));
}
/*
* Retrieve the persistent error log, uniquify the members, and return to the
* caller.
*/
int
zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t count;
zbookmark_phys_t *zb = NULL;
int i;
/*
* Retrieve the raw error list from the kernel. If the number of errors
* has increased, allocate more space and continue until we get the
* entire list.
*/
verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
&count) == 0);
if (count == 0)
return (0);
zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
count * sizeof (zbookmark_phys_t));
zc.zc_nvlist_dst_size = count;
(void) strcpy(zc.zc_name, zhp->zpool_name);
for (;;) {
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_ERROR_LOG,
&zc) != 0) {
free((void *)(uintptr_t)zc.zc_nvlist_dst);
if (errno == ENOMEM) {
void *dst;
count = zc.zc_nvlist_dst_size;
dst = zfs_alloc(zhp->zpool_hdl, count *
sizeof (zbookmark_phys_t));
zc.zc_nvlist_dst = (uintptr_t)dst;
} else {
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "errors: List of "
"errors unavailable")));
}
} else {
break;
}
}
/*
* Sort the resulting bookmarks. This is a little confusing due to the
* implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
* to first, and 'zc_nvlist_dst_size' indicates the number of bookmarks
* _not_ copied as part of the process. So we point the start of our
* array appropriate and decrement the total number of elements.
*/
zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
zc.zc_nvlist_dst_size;
count -= zc.zc_nvlist_dst_size;
qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
/*
* Fill in the nverrlistp with nvlist's of dataset and object numbers.
*/
for (i = 0; i < count; i++) {
nvlist_t *nv;
/* ignoring zb_blkid and zb_level for now */
if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
zb[i-1].zb_object == zb[i].zb_object)
continue;
if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
goto nomem;
if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
zb[i].zb_objset) != 0) {
nvlist_free(nv);
goto nomem;
}
if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
zb[i].zb_object) != 0) {
nvlist_free(nv);
goto nomem;
}
if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
nvlist_free(nv);
goto nomem;
}
nvlist_free(nv);
}
free((void *)(uintptr_t)zc.zc_nvlist_dst);
return (0);
nomem:
free((void *)(uintptr_t)zc.zc_nvlist_dst);
return (no_memory(zhp->zpool_hdl));
}
/*
* Upgrade a ZFS pool to the latest on-disk version.
*/
int
zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strcpy(zc.zc_name, zhp->zpool_name);
zc.zc_cookie = new_version;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
zhp->zpool_name));
return (0);
}
void
zfs_save_arguments(int argc, char **argv, char *string, int len)
{
int i;
(void) strlcpy(string, zfs_basename(argv[0]), len);
for (i = 1; i < argc; i++) {
(void) strlcat(string, " ", len);
(void) strlcat(string, argv[i], len);
}
}
int
zpool_log_history(libzfs_handle_t *hdl, const char *message)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *args;
int err;
args = fnvlist_alloc();
fnvlist_add_string(args, "message", message);
err = zcmd_write_src_nvlist(hdl, &zc, args);
if (err == 0)
err = zfs_ioctl(hdl, ZFS_IOC_LOG_HISTORY, &zc);
nvlist_free(args);
zcmd_free_nvlists(&zc);
return (err);
}
/*
* Perform ioctl to get some command history of a pool.
*
* 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
* logical offset of the history buffer to start reading from.
*
* Upon return, 'off' is the next logical offset to read from and
* 'len' is the actual amount of bytes read into 'buf'.
*/
static int
get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_history = (uint64_t)(uintptr_t)buf;
zc.zc_history_len = *len;
zc.zc_history_offset = *off;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
switch (errno) {
case EPERM:
return (zfs_error_fmt(hdl, EZFS_PERM,
dgettext(TEXT_DOMAIN,
"cannot show history for pool '%s'"),
zhp->zpool_name));
case ENOENT:
return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
dgettext(TEXT_DOMAIN, "cannot get history for pool "
"'%s'"), zhp->zpool_name));
case ENOTSUP:
return (zfs_error_fmt(hdl, EZFS_BADVERSION,
dgettext(TEXT_DOMAIN, "cannot get history for pool "
"'%s', pool must be upgraded"), zhp->zpool_name));
default:
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN,
"cannot get history for '%s'"), zhp->zpool_name));
}
}
*len = zc.zc_history_len;
*off = zc.zc_history_offset;
return (0);
}
/*
* Retrieve the command history of a pool.
*/
int
zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp, uint64_t *off,
boolean_t *eof)
{
char *buf;
int buflen = 128 * 1024;
nvlist_t **records = NULL;
uint_t numrecords = 0;
int err, i;
uint64_t start = *off;
buf = malloc(buflen);
if (buf == NULL)
return (ENOMEM);
/* process about 1MB a time */
while (*off - start < 1024 * 1024) {
uint64_t bytes_read = buflen;
uint64_t leftover;
if ((err = get_history(zhp, buf, off, &bytes_read)) != 0)
break;
/* if nothing else was read in, we're at EOF, just return */
if (!bytes_read) {
*eof = B_TRUE;
break;
}
if ((err = zpool_history_unpack(buf, bytes_read,
&leftover, &records, &numrecords)) != 0)
break;
*off -= leftover;
if (leftover == bytes_read) {
/*
* no progress made, because buffer is not big enough
* to hold this record; resize and retry.
*/
buflen *= 2;
free(buf);
buf = malloc(buflen);
if (buf == NULL)
return (ENOMEM);
}
}
free(buf);
if (!err) {
verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
records, numrecords) == 0);
}
for (i = 0; i < numrecords; i++)
nvlist_free(records[i]);
free(records);
return (err);
}
/*
* Retrieve the next event given the passed 'zevent_fd' file descriptor.
* If there is a new event available 'nvp' will contain a newly allocated
* nvlist and 'dropped' will be set to the number of missed events since
* the last call to this function. When 'nvp' is set to NULL it indicates
* no new events are available. In either case the function returns 0 and
* it is up to the caller to free 'nvp'. In the case of a fatal error the
* function will return a non-zero value. When the function is called in
* blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
* it will not return until a new event is available.
*/
int
zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
int *dropped, unsigned flags, int zevent_fd)
{
zfs_cmd_t zc = {"\0"};
int error = 0;
*nvp = NULL;
*dropped = 0;
zc.zc_cleanup_fd = zevent_fd;
if (flags & ZEVENT_NONBLOCK)
zc.zc_guid = ZEVENT_NONBLOCK;
if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
return (-1);
retry:
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
switch (errno) {
case ESHUTDOWN:
error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
dgettext(TEXT_DOMAIN, "zfs shutdown"));
goto out;
case ENOENT:
/* Blocking error case should not occur */
if (!(flags & ZEVENT_NONBLOCK))
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
goto out;
case ENOMEM:
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
error = zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot get event"));
goto out;
} else {
goto retry;
}
default:
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
goto out;
}
}
error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
if (error != 0)
goto out;
*dropped = (int)zc.zc_cookie;
out:
zcmd_free_nvlists(&zc);
return (error);
}
/*
* Clear all events.
*/
int
zpool_events_clear(libzfs_handle_t *hdl, int *count)
{
zfs_cmd_t zc = {"\0"};
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
return (zpool_standard_error(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot clear events")));
if (count != NULL)
*count = (int)zc.zc_cookie; /* # of events cleared */
return (0);
}
/*
* Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
* the passed zevent_fd file handle. On success zero is returned,
* otherwise -1 is returned and hdl->libzfs_error is set to the errno.
*/
int
zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
{
zfs_cmd_t zc = {"\0"};
int error = 0;
zc.zc_guid = eid;
zc.zc_cleanup_fd = zevent_fd;
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
switch (errno) {
case ENOENT:
error = zfs_error_fmt(hdl, EZFS_NOENT,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
case ENOMEM:
error = zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
default:
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
}
}
return (error);
}
static void
zpool_obj_to_path_impl(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len, boolean_t always_unmounted)
{
zfs_cmd_t zc = {"\0"};
boolean_t mounted = B_FALSE;
char *mntpnt = NULL;
char dsname[ZFS_MAX_DATASET_NAME_LEN];
if (dsobj == 0) {
/* special case for the MOS */
(void) snprintf(pathname, len, "<metadata>:<0x%llx>",
(longlong_t)obj);
return;
}
/* get the dataset's name */
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_obj = dsobj;
if (zfs_ioctl(zhp->zpool_hdl,
ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
/* just write out a path of two object numbers */
(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
(longlong_t)dsobj, (longlong_t)obj);
return;
}
(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
/* find out if the dataset is mounted */
mounted = !always_unmounted && is_mounted(zhp->zpool_hdl, dsname,
&mntpnt);
/* get the corrupted object's path */
(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
zc.zc_obj = obj;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_OBJ_TO_PATH,
&zc) == 0) {
if (mounted) {
(void) snprintf(pathname, len, "%s%s", mntpnt,
zc.zc_value);
} else {
(void) snprintf(pathname, len, "%s:%s",
dsname, zc.zc_value);
}
} else {
(void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
(longlong_t)obj);
}
free(mntpnt);
}
void
zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len)
{
zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_FALSE);
}
void
zpool_obj_to_path_ds(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len)
{
zpool_obj_to_path_impl(zhp, dsobj, obj, pathname, len, B_TRUE);
}
/*
* Wait while the specified activity is in progress in the pool.
*/
int
zpool_wait(zpool_handle_t *zhp, zpool_wait_activity_t activity)
{
boolean_t missing;
int error = zpool_wait_status(zhp, activity, &missing, NULL);
if (missing) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, ENOENT,
dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
zhp->zpool_name);
return (ENOENT);
} else {
return (error);
}
}
/*
* Wait for the given activity and return the status of the wait (whether or not
* any waiting was done) in the 'waited' parameter. Non-existent pools are
* reported via the 'missing' parameter, rather than by printing an error
* message. This is convenient when this function is called in a loop over a
* long period of time (as it is, for example, by zpool's wait cmd). In that
* scenario, a pool being exported or destroyed should be considered a normal
* event, so we don't want to print an error when we find that the pool doesn't
* exist.
*/
int
zpool_wait_status(zpool_handle_t *zhp, zpool_wait_activity_t activity,
boolean_t *missing, boolean_t *waited)
{
int error = lzc_wait(zhp->zpool_name, activity, waited);
*missing = (error == ENOENT);
if (*missing)
return (0);
if (error != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
dgettext(TEXT_DOMAIN, "error waiting in pool '%s'"),
zhp->zpool_name);
}
return (error);
}
int
zpool_set_bootenv(zpool_handle_t *zhp, const nvlist_t *envmap)
{
int error = lzc_set_bootenv(zhp->zpool_name, envmap);
if (error != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
dgettext(TEXT_DOMAIN,
"error setting bootenv in pool '%s'"), zhp->zpool_name);
}
return (error);
}
int
zpool_get_bootenv(zpool_handle_t *zhp, nvlist_t **nvlp)
{
nvlist_t *nvl;
int error;
nvl = NULL;
error = lzc_get_bootenv(zhp->zpool_name, &nvl);
if (error != 0) {
(void) zpool_standard_error_fmt(zhp->zpool_hdl, error,
dgettext(TEXT_DOMAIN,
"error getting bootenv in pool '%s'"), zhp->zpool_name);
} else {
*nvlp = nvl;
}
return (error);
}
/*
* Attempt to read and parse feature file(s) (from "compatibility" property).
* Files contain zpool feature names, comma or whitespace-separated.
* Comments (# character to next newline) are discarded.
*
* Arguments:
* compatibility : string containing feature filenames
* features : either NULL or pointer to array of boolean
* report : either NULL or pointer to string buffer
* rlen : length of "report" buffer
*
* compatibility is NULL (unset), "", "off", "legacy", or list of
* comma-separated filenames. filenames should either be absolute,
* or relative to:
* 1) ZPOOL_SYSCONF_COMPAT_D (eg: /etc/zfs/compatibility.d) or
* 2) ZPOOL_DATA_COMPAT_D (eg: /usr/share/zfs/compatibility.d).
* (Unset), "" or "off" => enable all features
* "legacy" => disable all features
*
* Any feature names read from files which match unames in spa_feature_table
* will have the corresponding boolean set in the features array (if non-NULL).
* If more than one feature set specified, only features present in *all* of
* them will be set.
*
* "report" if not NULL will be populated with a suitable status message.
*
* Return values:
* ZPOOL_COMPATIBILITY_OK : files read and parsed ok
* ZPOOL_COMPATIBILITY_BADFILE : file too big or not a text file
* ZPOOL_COMPATIBILITY_BADTOKEN : SYSCONF file contains invalid feature name
* ZPOOL_COMPATIBILITY_WARNTOKEN : DATA file contains invalid feature name
* ZPOOL_COMPATIBILITY_NOFILES : no feature files found
*/
zpool_compat_status_t
zpool_load_compat(const char *compat, boolean_t *features, char *report,
size_t rlen)
{
int sdirfd, ddirfd, featfd;
struct stat fs;
char *fc;
char *ps, *ls, *ws;
char *file, *line, *word;
char l_compat[ZFS_MAXPROPLEN];
boolean_t ret_nofiles = B_TRUE;
boolean_t ret_badfile = B_FALSE;
boolean_t ret_badtoken = B_FALSE;
boolean_t ret_warntoken = B_FALSE;
/* special cases (unset), "" and "off" => enable all features */
if (compat == NULL || compat[0] == '\0' ||
strcmp(compat, ZPOOL_COMPAT_OFF) == 0) {
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] = B_TRUE;
if (report != NULL)
strlcpy(report, gettext("all features enabled"), rlen);
return (ZPOOL_COMPATIBILITY_OK);
}
/* Final special case "legacy" => disable all features */
if (strcmp(compat, ZPOOL_COMPAT_LEGACY) == 0) {
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] = B_FALSE;
if (report != NULL)
strlcpy(report, gettext("all features disabled"), rlen);
return (ZPOOL_COMPATIBILITY_OK);
}
/*
* Start with all true; will be ANDed with results from each file
*/
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] = B_TRUE;
char err_badfile[1024] = "";
char err_badtoken[1024] = "";
/*
* We ignore errors from the directory open()
* as they're only needed if the filename is relative
* which will be checked during the openat().
*/
/* O_PATH safer than O_RDONLY if system allows it */
#if defined(O_PATH)
#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_PATH)
#else
#define ZC_DIR_FLAGS (O_DIRECTORY | O_CLOEXEC | O_RDONLY)
#endif
sdirfd = open(ZPOOL_SYSCONF_COMPAT_D, ZC_DIR_FLAGS);
ddirfd = open(ZPOOL_DATA_COMPAT_D, ZC_DIR_FLAGS);
(void) strlcpy(l_compat, compat, ZFS_MAXPROPLEN);
for (file = strtok_r(l_compat, ",", &ps);
file != NULL;
file = strtok_r(NULL, ",", &ps)) {
boolean_t l_features[SPA_FEATURES];
enum { Z_SYSCONF, Z_DATA } source;
/* try sysconfdir first, then datadir */
source = Z_SYSCONF;
if ((featfd = openat(sdirfd, file, O_RDONLY | O_CLOEXEC)) < 0) {
featfd = openat(ddirfd, file, O_RDONLY | O_CLOEXEC);
source = Z_DATA;
}
/* File readable and correct size? */
if (featfd < 0 ||
fstat(featfd, &fs) < 0 ||
fs.st_size < 1 ||
fs.st_size > ZPOOL_COMPAT_MAXSIZE) {
(void) close(featfd);
strlcat(err_badfile, file, ZFS_MAXPROPLEN);
strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
ret_badfile = B_TRUE;
continue;
}
/* Prefault the file if system allows */
#if defined(MAP_POPULATE)
#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_POPULATE)
#elif defined(MAP_PREFAULT_READ)
#define ZC_MMAP_FLAGS (MAP_PRIVATE | MAP_PREFAULT_READ)
#else
#define ZC_MMAP_FLAGS (MAP_PRIVATE)
#endif
/* private mmap() so we can strtok safely */
fc = (char *)mmap(NULL, fs.st_size, PROT_READ | PROT_WRITE,
ZC_MMAP_FLAGS, featfd, 0);
(void) close(featfd);
/* map ok, and last character == newline? */
if (fc == MAP_FAILED || fc[fs.st_size - 1] != '\n') {
(void) munmap((void *) fc, fs.st_size);
strlcat(err_badfile, file, ZFS_MAXPROPLEN);
strlcat(err_badfile, " ", ZFS_MAXPROPLEN);
ret_badfile = B_TRUE;
continue;
}
ret_nofiles = B_FALSE;
for (uint_t i = 0; i < SPA_FEATURES; i++)
l_features[i] = B_FALSE;
/* replace final newline with NULL to ensure string ends */
fc[fs.st_size - 1] = '\0';
for (line = strtok_r(fc, "\n", &ls);
line != NULL;
line = strtok_r(NULL, "\n", &ls)) {
/* discard comments */
char *r = strchr(line, '#');
if (r != NULL)
*r = '\0';
for (word = strtok_r(line, ", \t", &ws);
word != NULL;
word = strtok_r(NULL, ", \t", &ws)) {
/* Find matching feature name */
uint_t f;
for (f = 0; f < SPA_FEATURES; f++) {
zfeature_info_t *fi =
&spa_feature_table[f];
if (strcmp(word, fi->fi_uname) == 0) {
l_features[f] = B_TRUE;
break;
}
}
if (f < SPA_FEATURES)
continue;
/* found an unrecognized word */
/* lightly sanitize it */
if (strlen(word) > 32)
word[32] = '\0';
for (char *c = word; *c != '\0'; c++)
if (!isprint(*c))
*c = '?';
strlcat(err_badtoken, word, ZFS_MAXPROPLEN);
strlcat(err_badtoken, " ", ZFS_MAXPROPLEN);
if (source == Z_SYSCONF)
ret_badtoken = B_TRUE;
else
ret_warntoken = B_TRUE;
}
}
(void) munmap((void *) fc, fs.st_size);
if (features != NULL)
for (uint_t i = 0; i < SPA_FEATURES; i++)
features[i] &= l_features[i];
}
(void) close(sdirfd);
(void) close(ddirfd);
/* Return the most serious error */
if (ret_badfile) {
if (report != NULL)
snprintf(report, rlen, gettext("could not read/"
"parse feature file(s): %s"), err_badfile);
return (ZPOOL_COMPATIBILITY_BADFILE);
}
if (ret_nofiles) {
if (report != NULL)
strlcpy(report,
gettext("no valid compatibility files specified"),
rlen);
return (ZPOOL_COMPATIBILITY_NOFILES);
}
if (ret_badtoken) {
if (report != NULL)
snprintf(report, rlen, gettext("invalid feature "
"name(s) in local compatibility files: %s"),
err_badtoken);
return (ZPOOL_COMPATIBILITY_BADTOKEN);
}
if (ret_warntoken) {
if (report != NULL)
snprintf(report, rlen, gettext("unrecognized feature "
"name(s) in distribution compatibility files: %s"),
err_badtoken);
return (ZPOOL_COMPATIBILITY_WARNTOKEN);
}
if (report != NULL)
strlcpy(report, gettext("compatibility set ok"), rlen);
return (ZPOOL_COMPATIBILITY_OK);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
index 36a480d36a05..bee0aff4b49c 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_sendrecv.c
@@ -1,5190 +1,5190 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
* All rights reserved
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2018, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2019 Datto Inc.
*/
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <stddef.h>
#include <fcntl.h>
#include <sys/mount.h>
#include <sys/mntent.h>
#include <sys/mnttab.h>
#include <sys/avl.h>
#include <sys/debug.h>
#include <sys/stat.h>
#include <pthread.h>
#include <umem.h>
#include <time.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include <libzutil.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "zfs_fletcher.h"
#include "libzfs_impl.h"
#include <cityhash.h>
#include <zlib.h>
#include <sys/zio_checksum.h>
#include <sys/dsl_crypt.h>
#include <sys/ddt.h>
#include <sys/socket.h>
#include <sys/sha2.h>
static int zfs_receive_impl(libzfs_handle_t *, const char *, const char *,
recvflags_t *, int, const char *, nvlist_t *, avl_tree_t *, char **,
const char *, nvlist_t *);
static int guid_to_name_redact_snaps(libzfs_handle_t *hdl, const char *parent,
uint64_t guid, boolean_t bookmark_ok, uint64_t *redact_snap_guids,
uint64_t num_redact_snaps, char *name);
static int guid_to_name(libzfs_handle_t *, const char *,
uint64_t, boolean_t, char *);
typedef struct progress_arg {
zfs_handle_t *pa_zhp;
int pa_fd;
boolean_t pa_parsable;
boolean_t pa_estimate;
int pa_verbosity;
} progress_arg_t;
static int
dump_record(dmu_replay_record_t *drr, void *payload, int payload_len,
zio_cksum_t *zc, int outfd)
{
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
fletcher_4_incremental_native(drr,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum), zc);
if (drr->drr_type != DRR_BEGIN) {
ASSERT(ZIO_CHECKSUM_IS_ZERO(&drr->drr_u.
drr_checksum.drr_checksum));
drr->drr_u.drr_checksum.drr_checksum = *zc;
}
fletcher_4_incremental_native(&drr->drr_u.drr_checksum.drr_checksum,
sizeof (zio_cksum_t), zc);
if (write(outfd, drr, sizeof (*drr)) == -1)
return (errno);
if (payload_len != 0) {
fletcher_4_incremental_native(payload, payload_len, zc);
if (write(outfd, payload, payload_len) == -1)
return (errno);
}
return (0);
}
/*
* Routines for dealing with the AVL tree of fs-nvlists
*/
typedef struct fsavl_node {
avl_node_t fn_node;
nvlist_t *fn_nvfs;
char *fn_snapname;
uint64_t fn_guid;
} fsavl_node_t;
static int
fsavl_compare(const void *arg1, const void *arg2)
{
const fsavl_node_t *fn1 = (const fsavl_node_t *)arg1;
const fsavl_node_t *fn2 = (const fsavl_node_t *)arg2;
return (TREE_CMP(fn1->fn_guid, fn2->fn_guid));
}
/*
* Given the GUID of a snapshot, find its containing filesystem and
* (optionally) name.
*/
static nvlist_t *
fsavl_find(avl_tree_t *avl, uint64_t snapguid, char **snapname)
{
fsavl_node_t fn_find;
fsavl_node_t *fn;
fn_find.fn_guid = snapguid;
fn = avl_find(avl, &fn_find, NULL);
if (fn) {
if (snapname)
*snapname = fn->fn_snapname;
return (fn->fn_nvfs);
}
return (NULL);
}
static void
fsavl_destroy(avl_tree_t *avl)
{
fsavl_node_t *fn;
void *cookie;
if (avl == NULL)
return;
cookie = NULL;
while ((fn = avl_destroy_nodes(avl, &cookie)) != NULL)
free(fn);
avl_destroy(avl);
free(avl);
}
/*
* Given an nvlist, produce an avl tree of snapshots, ordered by guid
*/
static avl_tree_t *
fsavl_create(nvlist_t *fss)
{
avl_tree_t *fsavl;
nvpair_t *fselem = NULL;
if ((fsavl = malloc(sizeof (avl_tree_t))) == NULL)
return (NULL);
avl_create(fsavl, fsavl_compare, sizeof (fsavl_node_t),
offsetof(fsavl_node_t, fn_node));
while ((fselem = nvlist_next_nvpair(fss, fselem)) != NULL) {
nvlist_t *nvfs, *snaps;
nvpair_t *snapelem = NULL;
nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(nvfs, "snaps");
while ((snapelem =
nvlist_next_nvpair(snaps, snapelem)) != NULL) {
fsavl_node_t *fn;
uint64_t guid;
guid = fnvpair_value_uint64(snapelem);
if ((fn = malloc(sizeof (fsavl_node_t))) == NULL) {
fsavl_destroy(fsavl);
return (NULL);
}
fn->fn_nvfs = nvfs;
fn->fn_snapname = nvpair_name(snapelem);
fn->fn_guid = guid;
/*
* Note: if there are multiple snaps with the
* same GUID, we ignore all but one.
*/
if (avl_find(fsavl, fn, NULL) == NULL)
avl_add(fsavl, fn);
else
free(fn);
}
}
return (fsavl);
}
/*
* Routines for dealing with the giant nvlist of fs-nvlists, etc.
*/
typedef struct send_data {
/*
* assigned inside every recursive call,
* restored from *_save on return:
*
* guid of fromsnap snapshot in parent dataset
* txg of fromsnap snapshot in current dataset
* txg of tosnap snapshot in current dataset
*/
uint64_t parent_fromsnap_guid;
uint64_t fromsnap_txg;
uint64_t tosnap_txg;
/* the nvlists get accumulated during depth-first traversal */
nvlist_t *parent_snaps;
nvlist_t *fss;
nvlist_t *snapprops;
nvlist_t *snapholds; /* user holds */
/* send-receive configuration, does not change during traversal */
const char *fsname;
const char *fromsnap;
const char *tosnap;
boolean_t recursive;
boolean_t raw;
boolean_t doall;
boolean_t replicate;
boolean_t skipmissing;
boolean_t verbose;
boolean_t backup;
boolean_t seenfrom;
boolean_t seento;
boolean_t holds; /* were holds requested with send -h */
boolean_t props;
/*
* The header nvlist is of the following format:
* {
* "tosnap" -> string
* "fromsnap" -> string (if incremental)
* "fss" -> {
* id -> {
*
* "name" -> string (full name; for debugging)
* "parentfromsnap" -> number (guid of fromsnap in parent)
*
* "props" -> { name -> value (only if set here) }
* "snaps" -> { name (lastname) -> number (guid) }
* "snapprops" -> { name (lastname) -> { name -> value } }
* "snapholds" -> { name (lastname) -> { holdname -> crtime } }
*
* "origin" -> number (guid) (if clone)
* "is_encroot" -> boolean
* "sent" -> boolean (not on-disk)
* }
* }
* }
*
*/
} send_data_t;
static void
send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv);
static int
send_iterate_snap(zfs_handle_t *zhp, void *arg)
{
send_data_t *sd = arg;
uint64_t guid = zhp->zfs_dmustats.dds_guid;
uint64_t txg = zhp->zfs_dmustats.dds_creation_txg;
char *snapname;
nvlist_t *nv;
boolean_t isfromsnap, istosnap, istosnapwithnofrom;
snapname = strrchr(zhp->zfs_name, '@')+1;
isfromsnap = (sd->fromsnap != NULL &&
strcmp(sd->fromsnap, snapname) == 0);
istosnap = (sd->tosnap != NULL && (strcmp(sd->tosnap, snapname) == 0));
istosnapwithnofrom = (istosnap && sd->fromsnap == NULL);
if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) {
if (sd->verbose) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"skipping snapshot %s because it was created "
"after the destination snapshot (%s)\n"),
zhp->zfs_name, sd->tosnap);
}
zfs_close(zhp);
return (0);
}
fnvlist_add_uint64(sd->parent_snaps, snapname, guid);
/*
* NB: if there is no fromsnap here (it's a newly created fs in
* an incremental replication), we will substitute the tosnap.
*/
if (isfromsnap || (sd->parent_fromsnap_guid == 0 && istosnap)) {
sd->parent_fromsnap_guid = guid;
}
if (!sd->recursive) {
/*
* To allow a doall stream to work properly
* with a NULL fromsnap
*/
if (sd->doall && sd->fromsnap == NULL && !sd->seenfrom) {
sd->seenfrom = B_TRUE;
}
if (!sd->seenfrom && isfromsnap) {
sd->seenfrom = B_TRUE;
zfs_close(zhp);
return (0);
}
if ((sd->seento || !sd->seenfrom) && !istosnapwithnofrom) {
zfs_close(zhp);
return (0);
}
if (istosnap)
sd->seento = B_TRUE;
}
nv = fnvlist_alloc();
send_iterate_prop(zhp, sd->backup, nv);
fnvlist_add_nvlist(sd->snapprops, snapname, nv);
fnvlist_free(nv);
if (sd->holds) {
nvlist_t *holds = fnvlist_alloc();
int err = lzc_get_holds(zhp->zfs_name, &holds);
if (err == 0) {
fnvlist_add_nvlist(sd->snapholds, snapname, holds);
}
fnvlist_free(holds);
}
zfs_close(zhp);
return (0);
}
static void
send_iterate_prop(zfs_handle_t *zhp, boolean_t received_only, nvlist_t *nv)
{
nvlist_t *props = NULL;
nvpair_t *elem = NULL;
if (received_only)
props = zfs_get_recvd_props(zhp);
else
props = zhp->zfs_props;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
char *propname = nvpair_name(elem);
zfs_prop_t prop = zfs_name_to_prop(propname);
nvlist_t *propnv;
if (!zfs_prop_user(propname)) {
/*
* Realistically, this should never happen. However,
* we want the ability to add DSL properties without
* needing to make incompatible version changes. We
* need to ignore unknown properties to allow older
* software to still send datasets containing these
* properties, with the unknown properties elided.
*/
if (prop == ZPROP_INVAL)
continue;
if (zfs_prop_readonly(prop))
continue;
}
verify(nvpair_value_nvlist(elem, &propnv) == 0);
if (prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_RESERVATION ||
prop == ZFS_PROP_REFQUOTA ||
prop == ZFS_PROP_REFRESERVATION) {
char *source;
uint64_t value;
verify(nvlist_lookup_uint64(propnv,
ZPROP_VALUE, &value) == 0);
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT)
continue;
/*
* May have no source before SPA_VERSION_RECVD_PROPS,
* but is still modifiable.
*/
if (nvlist_lookup_string(propnv,
ZPROP_SOURCE, &source) == 0) {
if ((strcmp(source, zhp->zfs_name) != 0) &&
(strcmp(source,
ZPROP_SOURCE_VAL_RECVD) != 0))
continue;
}
} else {
char *source;
if (nvlist_lookup_string(propnv,
ZPROP_SOURCE, &source) != 0)
continue;
if ((strcmp(source, zhp->zfs_name) != 0) &&
(strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0))
continue;
}
if (zfs_prop_user(propname) ||
zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
char *value;
value = fnvlist_lookup_string(propnv, ZPROP_VALUE);
fnvlist_add_string(nv, propname, value);
} else {
uint64_t value;
value = fnvlist_lookup_uint64(propnv, ZPROP_VALUE);
fnvlist_add_uint64(nv, propname, value);
}
}
}
/*
* returns snapshot creation txg
* and returns 0 if the snapshot does not exist
*/
static uint64_t
get_snap_txg(libzfs_handle_t *hdl, const char *fs, const char *snap)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
uint64_t txg = 0;
if (fs == NULL || fs[0] == '\0' || snap == NULL || snap[0] == '\0')
return (txg);
(void) snprintf(name, sizeof (name), "%s@%s", fs, snap);
if (zfs_dataset_exists(hdl, name, ZFS_TYPE_SNAPSHOT)) {
zfs_handle_t *zhp = zfs_open(hdl, name, ZFS_TYPE_SNAPSHOT);
if (zhp != NULL) {
txg = zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG);
zfs_close(zhp);
}
}
return (txg);
}
/*
* recursively generate nvlists describing datasets. See comment
* for the data structure send_data_t above for description of contents
* of the nvlist.
*/
static int
send_iterate_fs(zfs_handle_t *zhp, void *arg)
{
send_data_t *sd = arg;
nvlist_t *nvfs = NULL, *nv = NULL;
int rv = 0;
uint64_t min_txg = 0, max_txg = 0;
uint64_t parent_fromsnap_guid_save = sd->parent_fromsnap_guid;
uint64_t fromsnap_txg_save = sd->fromsnap_txg;
uint64_t tosnap_txg_save = sd->tosnap_txg;
uint64_t txg = zhp->zfs_dmustats.dds_creation_txg;
uint64_t guid = zhp->zfs_dmustats.dds_guid;
uint64_t fromsnap_txg, tosnap_txg;
char guidstring[64];
fromsnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->fromsnap);
if (fromsnap_txg != 0)
sd->fromsnap_txg = fromsnap_txg;
tosnap_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name, sd->tosnap);
if (tosnap_txg != 0)
sd->tosnap_txg = tosnap_txg;
/*
* on the send side, if the current dataset does not have tosnap,
* perform two additional checks:
*
* - skip sending the current dataset if it was created later than
* the parent tosnap
* - return error if the current dataset was created earlier than
* the parent tosnap, unless --skip-missing specified. Then
* just print a warning
*/
if (sd->tosnap != NULL && tosnap_txg == 0) {
if (sd->tosnap_txg != 0 && txg > sd->tosnap_txg) {
if (sd->verbose) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"skipping dataset %s: snapshot %s does "
"not exist\n"), zhp->zfs_name, sd->tosnap);
}
} else if (sd->skipmissing) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: skipping dataset %s and its children:"
" snapshot %s does not exist\n"),
zhp->zfs_name, sd->tosnap);
} else {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"cannot send %s@%s%s: snapshot %s@%s does not "
"exist\n"), sd->fsname, sd->tosnap, sd->recursive ?
dgettext(TEXT_DOMAIN, " recursively") : "",
zhp->zfs_name, sd->tosnap);
rv = EZFS_NOENT;
}
goto out;
}
nvfs = fnvlist_alloc();
fnvlist_add_string(nvfs, "name", zhp->zfs_name);
fnvlist_add_uint64(nvfs, "parentfromsnap",
sd->parent_fromsnap_guid);
if (zhp->zfs_dmustats.dds_origin[0]) {
zfs_handle_t *origin = zfs_open(zhp->zfs_hdl,
zhp->zfs_dmustats.dds_origin, ZFS_TYPE_SNAPSHOT);
if (origin == NULL) {
rv = -1;
goto out;
}
fnvlist_add_uint64(nvfs, "origin",
origin->zfs_dmustats.dds_guid);
zfs_close(origin);
}
/* iterate over props */
if (sd->props || sd->backup || sd->recursive) {
nv = fnvlist_alloc();
send_iterate_prop(zhp, sd->backup, nv);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF) {
boolean_t encroot;
/* determine if this dataset is an encryption root */
if (zfs_crypto_get_encryption_root(zhp, &encroot, NULL) != 0) {
rv = -1;
goto out;
}
if (encroot)
fnvlist_add_boolean(nvfs, "is_encroot");
/*
* Encrypted datasets can only be sent with properties if
* the raw flag is specified because the receive side doesn't
* currently have a mechanism for recursively asking the user
* for new encryption parameters.
*/
if (!sd->raw) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"cannot send %s@%s: encrypted dataset %s may not "
"be sent with properties without the raw flag\n"),
sd->fsname, sd->tosnap, zhp->zfs_name);
rv = -1;
goto out;
}
}
if (nv != NULL)
fnvlist_add_nvlist(nvfs, "props", nv);
/* iterate over snaps, and set sd->parent_fromsnap_guid */
sd->parent_fromsnap_guid = 0;
sd->parent_snaps = fnvlist_alloc();
sd->snapprops = fnvlist_alloc();
if (sd->holds)
sd->snapholds = fnvlist_alloc();
/*
* If this is a "doall" send, a replicate send or we're just trying
* to gather a list of previous snapshots, iterate through all the
* snaps in the txg range. Otherwise just look at the one we're
* interested in.
*/
if (sd->doall || sd->replicate || sd->tosnap == NULL) {
if (!sd->replicate && fromsnap_txg != 0)
min_txg = fromsnap_txg;
if (!sd->replicate && tosnap_txg != 0)
max_txg = tosnap_txg;
(void) zfs_iter_snapshots_sorted(zhp, send_iterate_snap, sd,
min_txg, max_txg);
} else {
char snapname[MAXPATHLEN] = { 0 };
zfs_handle_t *snap;
(void) snprintf(snapname, sizeof (snapname), "%s@%s",
zhp->zfs_name, sd->tosnap);
if (sd->fromsnap != NULL)
sd->seenfrom = B_TRUE;
snap = zfs_open(zhp->zfs_hdl, snapname,
ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
(void) send_iterate_snap(snap, sd);
}
fnvlist_add_nvlist(nvfs, "snaps", sd->parent_snaps);
fnvlist_add_nvlist(nvfs, "snapprops", sd->snapprops);
if (sd->holds)
fnvlist_add_nvlist(nvfs, "snapholds", sd->snapholds);
fnvlist_free(sd->parent_snaps);
fnvlist_free(sd->snapprops);
fnvlist_free(sd->snapholds);
/* Do not allow the size of the properties list to exceed the limit */
if ((fnvlist_size(nvfs) + fnvlist_size(sd->fss)) >
zhp->zfs_hdl->libzfs_max_nvlist) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"warning: cannot send %s@%s: the size of the list of "
"snapshots and properties is too large to be received "
"successfully.\n"
"Select a smaller number of snapshots to send.\n"),
zhp->zfs_name, sd->tosnap);
rv = EZFS_NOSPC;
goto out;
}
/* add this fs to nvlist */
(void) snprintf(guidstring, sizeof (guidstring),
"0x%llx", (longlong_t)guid);
fnvlist_add_nvlist(sd->fss, guidstring, nvfs);
/* iterate over children */
if (sd->recursive)
rv = zfs_iter_filesystems(zhp, send_iterate_fs, sd);
out:
sd->parent_fromsnap_guid = parent_fromsnap_guid_save;
sd->fromsnap_txg = fromsnap_txg_save;
sd->tosnap_txg = tosnap_txg_save;
fnvlist_free(nv);
fnvlist_free(nvfs);
zfs_close(zhp);
return (rv);
}
static int
gather_nvlist(libzfs_handle_t *hdl, const char *fsname, const char *fromsnap,
const char *tosnap, boolean_t recursive, boolean_t raw, boolean_t doall,
boolean_t replicate, boolean_t skipmissing, boolean_t verbose,
boolean_t backup, boolean_t holds, boolean_t props, nvlist_t **nvlp,
avl_tree_t **avlp)
{
zfs_handle_t *zhp;
send_data_t sd = { 0 };
int error;
zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (EZFS_BADTYPE);
sd.fss = fnvlist_alloc();
sd.fsname = fsname;
sd.fromsnap = fromsnap;
sd.tosnap = tosnap;
sd.recursive = recursive;
sd.raw = raw;
sd.doall = doall;
sd.replicate = replicate;
sd.skipmissing = skipmissing;
sd.verbose = verbose;
sd.backup = backup;
sd.holds = holds;
sd.props = props;
if ((error = send_iterate_fs(zhp, &sd)) != 0) {
fnvlist_free(sd.fss);
if (avlp != NULL)
*avlp = NULL;
*nvlp = NULL;
return (error);
}
if (avlp != NULL && (*avlp = fsavl_create(sd.fss)) == NULL) {
fnvlist_free(sd.fss);
*nvlp = NULL;
return (EZFS_NOMEM);
}
*nvlp = sd.fss;
return (0);
}
/*
* Routines specific to "zfs send"
*/
typedef struct send_dump_data {
/* these are all just the short snapname (the part after the @) */
const char *fromsnap;
const char *tosnap;
char prevsnap[ZFS_MAX_DATASET_NAME_LEN];
uint64_t prevsnap_obj;
boolean_t seenfrom, seento, replicate, doall, fromorigin;
boolean_t dryrun, parsable, progress, embed_data, std_out;
boolean_t large_block, compress, raw, holds;
int outfd;
boolean_t err;
nvlist_t *fss;
nvlist_t *snapholds;
avl_tree_t *fsavl;
snapfilter_cb_t *filter_cb;
void *filter_cb_arg;
nvlist_t *debugnv;
char holdtag[ZFS_MAX_DATASET_NAME_LEN];
int cleanup_fd;
int verbosity;
uint64_t size;
} send_dump_data_t;
static int
zfs_send_space(zfs_handle_t *zhp, const char *snapname, const char *from,
enum lzc_send_flags flags, uint64_t *spacep)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
int error;
assert(snapname != NULL);
error = lzc_send_space(snapname, from, flags, spacep);
if (error != 0) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot estimate space for '%s'"), snapname);
switch (error) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case ENOENT:
if (zfs_dataset_exists(hdl, snapname,
ZFS_TYPE_SNAPSHOT)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (%s) does not exist"),
snapname);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
case EINVAL:
zfs_error_aux(hdl, "%s", strerror(error));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, error, errbuf));
}
}
return (0);
}
/*
* Dumps a backup of the given snapshot (incremental from fromsnap if it's not
* NULL) to the file descriptor specified by outfd.
*/
static int
dump_ioctl(zfs_handle_t *zhp, const char *fromsnap, uint64_t fromsnap_obj,
boolean_t fromorigin, int outfd, enum lzc_send_flags flags,
nvlist_t *debugnv)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *thisdbg;
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
assert(fromsnap_obj == 0 || !fromorigin);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_cookie = outfd;
zc.zc_obj = fromorigin;
zc.zc_sendobj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zc.zc_fromobj = fromsnap_obj;
zc.zc_flags = flags;
thisdbg = fnvlist_alloc();
if (fromsnap && fromsnap[0] != '\0') {
fnvlist_add_string(thisdbg, "fromsnap", fromsnap);
}
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND, &zc) != 0) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), zhp->zfs_name);
fnvlist_add_uint64(thisdbg, "error", errno);
if (debugnv) {
fnvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg);
}
fnvlist_free(thisdbg);
switch (errno) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"source key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ENOENT:
if (zfs_dataset_exists(hdl, zc.zc_name,
ZFS_TYPE_SNAPSHOT)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (@%s) does not exist"),
zc.zc_value);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
case EINVAL:
zfs_error_aux(hdl, "%s", strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
if (debugnv)
fnvlist_add_nvlist(debugnv, zhp->zfs_name, thisdbg);
fnvlist_free(thisdbg);
return (0);
}
static void
gather_holds(zfs_handle_t *zhp, send_dump_data_t *sdd)
{
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
/*
* zfs_send() only sets snapholds for sends that need them,
* e.g. replication and doall.
*/
if (sdd->snapholds == NULL)
return;
fnvlist_add_string(sdd->snapholds, zhp->zfs_name, sdd->holdtag);
}
int
zfs_send_progress(zfs_handle_t *zhp, int fd, uint64_t *bytes_written,
uint64_t *blocks_visited)
{
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_cookie = fd;
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_SEND_PROGRESS, &zc) != 0)
return (errno);
if (bytes_written != NULL)
*bytes_written = zc.zc_cookie;
if (blocks_visited != NULL)
*blocks_visited = zc.zc_objset_type;
return (0);
}
static void *
send_progress_thread(void *arg)
{
progress_arg_t *pa = arg;
zfs_handle_t *zhp = pa->pa_zhp;
uint64_t bytes;
uint64_t blocks;
char buf[16];
time_t t;
struct tm *tm;
boolean_t firstloop = B_TRUE;
/*
* Print the progress from ZFS_IOC_SEND_PROGRESS every second.
*/
for (;;) {
int err;
(void) sleep(1);
if ((err = zfs_send_progress(zhp, pa->pa_fd, &bytes,
&blocks)) != 0) {
if (err == EINTR || err == ENOENT)
return ((void *)0);
return ((void *)(uintptr_t)err);
}
if (firstloop && !pa->pa_parsable) {
(void) fprintf(stderr,
"TIME %s %sSNAPSHOT %s\n",
pa->pa_estimate ? "BYTES" : " SENT",
pa->pa_verbosity >= 2 ? " BLOCKS " : "",
zhp->zfs_name);
firstloop = B_FALSE;
}
(void) time(&t);
tm = localtime(&t);
if (pa->pa_verbosity >= 2 && pa->pa_parsable) {
(void) fprintf(stderr,
"%02d:%02d:%02d\t%llu\t%llu\t%s\n",
tm->tm_hour, tm->tm_min, tm->tm_sec,
(u_longlong_t)bytes, (u_longlong_t)blocks,
zhp->zfs_name);
} else if (pa->pa_verbosity >= 2) {
zfs_nicenum(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"%02d:%02d:%02d %5s %8llu %s\n",
tm->tm_hour, tm->tm_min, tm->tm_sec,
buf, (u_longlong_t)blocks, zhp->zfs_name);
} else if (pa->pa_parsable) {
(void) fprintf(stderr, "%02d:%02d:%02d\t%llu\t%s\n",
tm->tm_hour, tm->tm_min, tm->tm_sec,
(u_longlong_t)bytes, zhp->zfs_name);
} else {
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr, "%02d:%02d:%02d %5s %s\n",
tm->tm_hour, tm->tm_min, tm->tm_sec,
buf, zhp->zfs_name);
}
}
}
static void
send_print_verbose(FILE *fout, const char *tosnap, const char *fromsnap,
uint64_t size, boolean_t parsable)
{
if (parsable) {
if (fromsnap != NULL) {
(void) fprintf(fout, "incremental\t%s\t%s",
fromsnap, tosnap);
} else {
(void) fprintf(fout, "full\t%s",
tosnap);
}
} else {
if (fromsnap != NULL) {
if (strchr(fromsnap, '@') == NULL &&
strchr(fromsnap, '#') == NULL) {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"send from @%s to %s"),
fromsnap, tosnap);
} else {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"send from %s to %s"),
fromsnap, tosnap);
}
} else {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"full send of %s"),
tosnap);
}
}
if (parsable) {
(void) fprintf(fout, "\t%llu",
(longlong_t)size);
} else if (size != 0) {
char buf[16];
zfs_nicebytes(size, buf, sizeof (buf));
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
" estimated size is %s"), buf);
}
(void) fprintf(fout, "\n");
}
static int
dump_snapshot(zfs_handle_t *zhp, void *arg)
{
send_dump_data_t *sdd = arg;
progress_arg_t pa = { 0 };
pthread_t tid;
char *thissnap;
enum lzc_send_flags flags = 0;
int err;
boolean_t isfromsnap, istosnap, fromorigin;
boolean_t exclude = B_FALSE;
FILE *fout = sdd->std_out ? stdout : stderr;
err = 0;
thissnap = strchr(zhp->zfs_name, '@') + 1;
isfromsnap = (sdd->fromsnap != NULL &&
strcmp(sdd->fromsnap, thissnap) == 0);
if (!sdd->seenfrom && isfromsnap) {
gather_holds(zhp, sdd);
sdd->seenfrom = B_TRUE;
(void) strlcpy(sdd->prevsnap, thissnap,
sizeof (sdd->prevsnap));
sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zfs_close(zhp);
return (0);
}
if (sdd->seento || !sdd->seenfrom) {
zfs_close(zhp);
return (0);
}
istosnap = (strcmp(sdd->tosnap, thissnap) == 0);
if (istosnap)
sdd->seento = B_TRUE;
if (sdd->large_block)
flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (sdd->embed_data)
flags |= LZC_SEND_FLAG_EMBED_DATA;
if (sdd->compress)
flags |= LZC_SEND_FLAG_COMPRESS;
if (sdd->raw)
flags |= LZC_SEND_FLAG_RAW;
if (!sdd->doall && !isfromsnap && !istosnap) {
if (sdd->replicate) {
char *snapname;
nvlist_t *snapprops;
/*
* Filter out all intermediate snapshots except origin
* snapshots needed to replicate clones.
*/
nvlist_t *nvfs = fsavl_find(sdd->fsavl,
zhp->zfs_dmustats.dds_guid, &snapname);
snapprops = fnvlist_lookup_nvlist(nvfs, "snapprops");
snapprops = fnvlist_lookup_nvlist(snapprops, thissnap);
exclude = !nvlist_exists(snapprops, "is_clone_origin");
} else {
exclude = B_TRUE;
}
}
/*
* If a filter function exists, call it to determine whether
* this snapshot will be sent.
*/
if (exclude || (sdd->filter_cb != NULL &&
sdd->filter_cb(zhp, sdd->filter_cb_arg) == B_FALSE)) {
/*
* This snapshot is filtered out. Don't send it, and don't
* set prevsnap_obj, so it will be as if this snapshot didn't
* exist, and the next accepted snapshot will be sent as
* an incremental from the last accepted one, or as the
* first (and full) snapshot in the case of a replication,
* non-incremental send.
*/
zfs_close(zhp);
return (0);
}
gather_holds(zhp, sdd);
fromorigin = sdd->prevsnap[0] == '\0' &&
(sdd->fromorigin || sdd->replicate);
if (sdd->verbosity != 0) {
uint64_t size = 0;
char fromds[ZFS_MAX_DATASET_NAME_LEN];
if (sdd->prevsnap[0] != '\0') {
(void) strlcpy(fromds, zhp->zfs_name, sizeof (fromds));
*(strchr(fromds, '@') + 1) = '\0';
(void) strlcat(fromds, sdd->prevsnap, sizeof (fromds));
}
if (zfs_send_space(zhp, zhp->zfs_name,
sdd->prevsnap[0] ? fromds : NULL, flags, &size) != 0) {
size = 0; /* cannot estimate send space */
} else {
send_print_verbose(fout, zhp->zfs_name,
sdd->prevsnap[0] ? sdd->prevsnap : NULL,
size, sdd->parsable);
}
sdd->size += size;
}
if (!sdd->dryrun) {
/*
* If progress reporting is requested, spawn a new thread to
* poll ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
if (sdd->progress) {
pa.pa_zhp = zhp;
pa.pa_fd = sdd->outfd;
pa.pa_parsable = sdd->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = sdd->verbosity;
if ((err = pthread_create(&tid, NULL,
send_progress_thread, &pa)) != 0) {
zfs_close(zhp);
return (err);
}
}
err = dump_ioctl(zhp, sdd->prevsnap, sdd->prevsnap_obj,
fromorigin, sdd->outfd, flags, sdd->debugnv);
if (sdd->progress) {
void *status = NULL;
(void) pthread_cancel(tid);
(void) pthread_join(tid, &status);
int error = (int)(uintptr_t)status;
if (error != 0 && status != PTHREAD_CANCELED) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"progress thread exited nonzero"));
return (zfs_standard_error(zhp->zfs_hdl, error,
errbuf));
}
}
}
(void) strcpy(sdd->prevsnap, thissnap);
sdd->prevsnap_obj = zfs_prop_get_int(zhp, ZFS_PROP_OBJSETID);
zfs_close(zhp);
return (err);
}
static int
dump_filesystem(zfs_handle_t *zhp, void *arg)
{
int rv = 0;
send_dump_data_t *sdd = arg;
boolean_t missingfrom = B_FALSE;
zfs_cmd_t zc = {"\0"};
uint64_t min_txg = 0, max_txg = 0;
(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
zhp->zfs_name, sdd->tosnap);
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_STATS, &zc) != 0) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s: does not exist\n"),
zhp->zfs_name, sdd->tosnap);
sdd->err = B_TRUE;
return (0);
}
if (sdd->replicate && sdd->fromsnap) {
/*
* If this fs does not have fromsnap, and we're doing
* recursive, we need to send a full stream from the
* beginning (or an incremental from the origin if this
* is a clone). If we're doing non-recursive, then let
* them get the error.
*/
(void) snprintf(zc.zc_name, sizeof (zc.zc_name), "%s@%s",
zhp->zfs_name, sdd->fromsnap);
if (zfs_ioctl(zhp->zfs_hdl,
ZFS_IOC_OBJSET_STATS, &zc) != 0) {
missingfrom = B_TRUE;
}
}
sdd->seenfrom = sdd->seento = sdd->prevsnap[0] = 0;
sdd->prevsnap_obj = 0;
if (sdd->fromsnap == NULL || missingfrom)
sdd->seenfrom = B_TRUE;
/*
* Iterate through all snapshots and process the ones we will be
* sending. If we only have a "from" and "to" snapshot to deal
* with, we can avoid iterating through all the other snapshots.
*/
if (sdd->doall || sdd->replicate || sdd->tosnap == NULL) {
if (!sdd->replicate && sdd->fromsnap != NULL)
min_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name,
sdd->fromsnap);
if (!sdd->replicate && sdd->tosnap != NULL)
max_txg = get_snap_txg(zhp->zfs_hdl, zhp->zfs_name,
sdd->tosnap);
rv = zfs_iter_snapshots_sorted(zhp, dump_snapshot, arg,
min_txg, max_txg);
} else {
char snapname[MAXPATHLEN] = { 0 };
zfs_handle_t *snap;
if (!sdd->seenfrom) {
(void) snprintf(snapname, sizeof (snapname),
"%s@%s", zhp->zfs_name, sdd->fromsnap);
snap = zfs_open(zhp->zfs_hdl, snapname,
ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
rv = dump_snapshot(snap, sdd);
else
rv = -1;
}
if (rv == 0) {
(void) snprintf(snapname, sizeof (snapname),
"%s@%s", zhp->zfs_name, sdd->tosnap);
snap = zfs_open(zhp->zfs_hdl, snapname,
ZFS_TYPE_SNAPSHOT);
if (snap != NULL)
rv = dump_snapshot(snap, sdd);
else
rv = -1;
}
}
if (!sdd->seenfrom) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s:\n"
"incremental source (%s@%s) does not exist\n"),
zhp->zfs_name, sdd->tosnap,
zhp->zfs_name, sdd->fromsnap);
sdd->err = B_TRUE;
} else if (!sdd->seento) {
if (sdd->fromsnap) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: could not send %s@%s:\n"
"incremental source (%s@%s) "
"is not earlier than it\n"),
zhp->zfs_name, sdd->tosnap,
zhp->zfs_name, sdd->fromsnap);
} else {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"WARNING: "
"could not send %s@%s: does not exist\n"),
zhp->zfs_name, sdd->tosnap);
}
sdd->err = B_TRUE;
}
return (rv);
}
static int
dump_filesystems(zfs_handle_t *rzhp, void *arg)
{
send_dump_data_t *sdd = arg;
nvpair_t *fspair;
boolean_t needagain, progress;
if (!sdd->replicate)
return (dump_filesystem(rzhp, sdd));
/* Mark the clone origin snapshots. */
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *nvfs;
uint64_t origin_guid = 0;
nvfs = fnvpair_value_nvlist(fspair);
(void) nvlist_lookup_uint64(nvfs, "origin", &origin_guid);
if (origin_guid != 0) {
char *snapname;
nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
origin_guid, &snapname);
if (origin_nv != NULL) {
nvlist_t *snapprops;
snapprops = fnvlist_lookup_nvlist(origin_nv,
"snapprops");
snapprops = fnvlist_lookup_nvlist(snapprops,
snapname);
fnvlist_add_boolean(snapprops,
"is_clone_origin");
}
}
}
again:
needagain = progress = B_FALSE;
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *fslist, *parent_nv;
char *fsname;
zfs_handle_t *zhp;
int err;
uint64_t origin_guid = 0;
uint64_t parent_guid = 0;
fslist = fnvpair_value_nvlist(fspair);
if (nvlist_lookup_boolean(fslist, "sent") == 0)
continue;
fsname = fnvlist_lookup_string(fslist, "name");
(void) nvlist_lookup_uint64(fslist, "origin", &origin_guid);
(void) nvlist_lookup_uint64(fslist, "parentfromsnap",
&parent_guid);
if (parent_guid != 0) {
parent_nv = fsavl_find(sdd->fsavl, parent_guid, NULL);
if (!nvlist_exists(parent_nv, "sent")) {
/* parent has not been sent; skip this one */
needagain = B_TRUE;
continue;
}
}
if (origin_guid != 0) {
nvlist_t *origin_nv = fsavl_find(sdd->fsavl,
origin_guid, NULL);
if (origin_nv != NULL &&
!nvlist_exists(origin_nv, "sent")) {
/*
* origin has not been sent yet;
* skip this clone.
*/
needagain = B_TRUE;
continue;
}
}
zhp = zfs_open(rzhp->zfs_hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL)
return (-1);
err = dump_filesystem(zhp, sdd);
fnvlist_add_boolean(fslist, "sent");
progress = B_TRUE;
zfs_close(zhp);
if (err)
return (err);
}
if (needagain) {
assert(progress);
goto again;
}
/* clean out the sent flags in case we reuse this fss */
for (fspair = nvlist_next_nvpair(sdd->fss, NULL); fspair;
fspair = nvlist_next_nvpair(sdd->fss, fspair)) {
nvlist_t *fslist;
fslist = fnvpair_value_nvlist(fspair);
(void) nvlist_remove_all(fslist, "sent");
}
return (0);
}
nvlist_t *
zfs_send_resume_token_to_nvlist(libzfs_handle_t *hdl, const char *token)
{
unsigned int version;
int nread, i;
unsigned long long checksum, packed_len;
/*
* Decode token header, which is:
* <token version>-<checksum of payload>-<uncompressed payload length>
* Note that the only supported token version is 1.
*/
nread = sscanf(token, "%u-%llx-%llx-",
&version, &checksum, &packed_len);
if (nread != 3) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (invalid format)"));
return (NULL);
}
if (version != ZFS_SEND_RESUME_TOKEN_VERSION) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (invalid version %u)"),
version);
return (NULL);
}
/* convert hexadecimal representation to binary */
token = strrchr(token, '-') + 1;
int len = strlen(token) / 2;
unsigned char *compressed = zfs_alloc(hdl, len);
for (i = 0; i < len; i++) {
nread = sscanf(token + i * 2, "%2hhx", compressed + i);
if (nread != 1) {
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt "
"(payload is not hex-encoded)"));
return (NULL);
}
}
/* verify checksum */
zio_cksum_t cksum;
fletcher_4_native_varsize(compressed, len, &cksum);
if (cksum.zc_word[0] != checksum) {
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (incorrect checksum)"));
return (NULL);
}
/* uncompress */
void *packed = zfs_alloc(hdl, packed_len);
uLongf packed_len_long = packed_len;
if (uncompress(packed, &packed_len_long, compressed, len) != Z_OK ||
packed_len_long != packed_len) {
free(packed);
free(compressed);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (decompression failed)"));
return (NULL);
}
/* unpack nvlist */
nvlist_t *nv;
int error = nvlist_unpack(packed, packed_len, &nv, KM_SLEEP);
free(packed);
free(compressed);
if (error != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt (nvlist_unpack failed)"));
return (NULL);
}
return (nv);
}
static enum lzc_send_flags
lzc_flags_from_sendflags(const sendflags_t *flags)
{
enum lzc_send_flags lzc_flags = 0;
if (flags->largeblock)
lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (flags->embed_data)
lzc_flags |= LZC_SEND_FLAG_EMBED_DATA;
if (flags->compress)
lzc_flags |= LZC_SEND_FLAG_COMPRESS;
if (flags->raw)
lzc_flags |= LZC_SEND_FLAG_RAW;
if (flags->saved)
lzc_flags |= LZC_SEND_FLAG_SAVED;
return (lzc_flags);
}
static int
estimate_size(zfs_handle_t *zhp, const char *from, int fd, sendflags_t *flags,
uint64_t resumeobj, uint64_t resumeoff, uint64_t bytes,
const char *redactbook, char *errbuf)
{
uint64_t size;
FILE *fout = flags->dryrun ? stdout : stderr;
progress_arg_t pa = { 0 };
int err = 0;
pthread_t ptid;
if (flags->progress) {
pa.pa_zhp = zhp;
pa.pa_fd = fd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_TRUE;
pa.pa_verbosity = flags->verbosity;
err = pthread_create(&ptid, NULL,
send_progress_thread, &pa);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(errno));
return (zfs_error(zhp->zfs_hdl,
EZFS_THREADCREATEFAILED, errbuf));
}
}
err = lzc_send_space_resume_redacted(zhp->zfs_name, from,
lzc_flags_from_sendflags(flags), resumeobj, resumeoff, bytes,
redactbook, fd, &size);
if (flags->progress) {
void *status = NULL;
(void) pthread_cancel(ptid);
(void) pthread_join(ptid, &status);
int error = (int)(uintptr_t)status;
if (error != 0 && status != PTHREAD_CANCELED) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "progress thread exited "
"nonzero"));
return (zfs_standard_error(zhp->zfs_hdl, error,
errbuf));
}
}
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
send_print_verbose(fout, zhp->zfs_name, from, size,
flags->parsable);
if (flags->parsable) {
(void) fprintf(fout, "size\t%llu\n", (longlong_t)size);
} else {
char buf[16];
zfs_nicenum(size, buf, sizeof (buf));
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"total estimated size is %s\n"), buf);
}
return (0);
}
static boolean_t
redact_snaps_contains(const uint64_t *snaps, uint64_t num_snaps, uint64_t guid)
{
for (int i = 0; i < num_snaps; i++) {
if (snaps[i] == guid)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
redact_snaps_equal(const uint64_t *snaps1, uint64_t num_snaps1,
const uint64_t *snaps2, uint64_t num_snaps2)
{
if (num_snaps1 != num_snaps2)
return (B_FALSE);
for (int i = 0; i < num_snaps1; i++) {
if (!redact_snaps_contains(snaps2, num_snaps2, snaps1[i]))
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Check that the list of redaction snapshots in the bookmark matches the send
* we're resuming, and return whether or not it's complete.
*
* Note that the caller needs to free the contents of *bookname with free() if
* this function returns successfully.
*/
static int
find_redact_book(libzfs_handle_t *hdl, const char *path,
const uint64_t *redact_snap_guids, int num_redact_snaps,
char **bookname)
{
char errbuf[1024];
int error = 0;
nvlist_t *props = fnvlist_alloc();
nvlist_t *bmarks;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
fnvlist_add_boolean(props, "redact_complete");
fnvlist_add_boolean(props, zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
error = lzc_get_bookmarks(path, props, &bmarks);
fnvlist_free(props);
if (error != 0) {
if (error == ESRCH) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"nonexistent redaction bookmark provided"));
} else if (error == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset to be sent no longer exists"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"unknown error: %s"), strerror(error));
}
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
nvpair_t *pair;
for (pair = nvlist_next_nvpair(bmarks, NULL); pair;
pair = nvlist_next_nvpair(bmarks, pair)) {
nvlist_t *bmark = fnvpair_value_nvlist(pair);
nvlist_t *vallist = fnvlist_lookup_nvlist(bmark,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
uint_t len = 0;
uint64_t *bmarksnaps = fnvlist_lookup_uint64_array(vallist,
ZPROP_VALUE, &len);
if (redact_snaps_equal(redact_snap_guids,
num_redact_snaps, bmarksnaps, len)) {
break;
}
}
if (pair == NULL) {
fnvlist_free(bmarks);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no appropriate redaction bookmark exists"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
char *name = nvpair_name(pair);
nvlist_t *bmark = fnvpair_value_nvlist(pair);
nvlist_t *vallist = fnvlist_lookup_nvlist(bmark, "redact_complete");
boolean_t complete = fnvlist_lookup_boolean_value(vallist,
ZPROP_VALUE);
if (!complete) {
fnvlist_free(bmarks);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incomplete redaction bookmark provided"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
*bookname = strndup(name, ZFS_MAX_DATASET_NAME_LEN);
ASSERT3P(*bookname, !=, NULL);
fnvlist_free(bmarks);
return (0);
}
static int
zfs_send_resume_impl(libzfs_handle_t *hdl, sendflags_t *flags, int outfd,
nvlist_t *resume_nvl)
{
char errbuf[1024];
char *toname;
char *fromname = NULL;
uint64_t resumeobj, resumeoff, toguid, fromguid, bytes;
zfs_handle_t *zhp;
int error = 0;
char name[ZFS_MAX_DATASET_NAME_LEN];
enum lzc_send_flags lzc_flags = 0;
FILE *fout = (flags->verbosity > 0 && flags->dryrun) ? stdout : stderr;
uint64_t *redact_snap_guids = NULL;
int num_redact_snaps = 0;
char *redact_book = NULL;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
if (flags->verbosity != 0) {
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"resume token contents:\n"));
nvlist_print(fout, resume_nvl);
}
if (nvlist_lookup_string(resume_nvl, "toname", &toname) != 0 ||
nvlist_lookup_uint64(resume_nvl, "object", &resumeobj) != 0 ||
nvlist_lookup_uint64(resume_nvl, "offset", &resumeoff) != 0 ||
nvlist_lookup_uint64(resume_nvl, "bytes", &bytes) != 0 ||
nvlist_lookup_uint64(resume_nvl, "toguid", &toguid) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"resume token is corrupt"));
return (zfs_error(hdl, EZFS_FAULT, errbuf));
}
fromguid = 0;
(void) nvlist_lookup_uint64(resume_nvl, "fromguid", &fromguid);
if (flags->largeblock || nvlist_exists(resume_nvl, "largeblockok"))
lzc_flags |= LZC_SEND_FLAG_LARGE_BLOCK;
if (flags->embed_data || nvlist_exists(resume_nvl, "embedok"))
lzc_flags |= LZC_SEND_FLAG_EMBED_DATA;
if (flags->compress || nvlist_exists(resume_nvl, "compressok"))
lzc_flags |= LZC_SEND_FLAG_COMPRESS;
if (flags->raw || nvlist_exists(resume_nvl, "rawok"))
lzc_flags |= LZC_SEND_FLAG_RAW;
if (flags->saved || nvlist_exists(resume_nvl, "savedok"))
lzc_flags |= LZC_SEND_FLAG_SAVED;
if (flags->saved) {
(void) strcpy(name, toname);
} else {
error = guid_to_name(hdl, toname, toguid, B_FALSE, name);
if (error != 0) {
if (zfs_dataset_exists(hdl, toname, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is no longer the same snapshot "
"used in the initial send"), toname);
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' used in the initial send no "
"longer exists"), toname);
}
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
}
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"unable to access '%s'"), name);
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
if (nvlist_lookup_uint64_array(resume_nvl, "book_redact_snaps",
&redact_snap_guids, (uint_t *)&num_redact_snaps) != 0) {
num_redact_snaps = -1;
}
if (fromguid != 0) {
if (guid_to_name_redact_snaps(hdl, toname, fromguid, B_TRUE,
redact_snap_guids, num_redact_snaps, name) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source %#llx no longer exists"),
(longlong_t)fromguid);
return (zfs_error(hdl, EZFS_BADPATH, errbuf));
}
fromname = name;
}
redact_snap_guids = NULL;
if (nvlist_lookup_uint64_array(resume_nvl,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS), &redact_snap_guids,
(uint_t *)&num_redact_snaps) == 0) {
char path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(path, toname, sizeof (path));
char *at = strchr(path, '@');
ASSERT3P(at, !=, NULL);
*at = '\0';
if ((error = find_redact_book(hdl, path, redact_snap_guids,
num_redact_snaps, &redact_book)) != 0) {
return (error);
}
}
if (flags->verbosity != 0) {
/*
* Some of these may have come from the resume token, set them
* here for size estimate purposes.
*/
sendflags_t tmpflags = *flags;
if (lzc_flags & LZC_SEND_FLAG_LARGE_BLOCK)
tmpflags.largeblock = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_COMPRESS)
tmpflags.compress = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_EMBED_DATA)
tmpflags.embed_data = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_RAW)
tmpflags.raw = B_TRUE;
if (lzc_flags & LZC_SEND_FLAG_SAVED)
tmpflags.saved = B_TRUE;
error = estimate_size(zhp, fromname, outfd, &tmpflags,
resumeobj, resumeoff, bytes, redact_book, errbuf);
}
if (!flags->dryrun) {
progress_arg_t pa = { 0 };
pthread_t tid;
/*
* If progress reporting is requested, spawn a new thread to
* poll ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
if (flags->progress) {
pa.pa_zhp = zhp;
pa.pa_fd = outfd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = flags->verbosity;
error = pthread_create(&tid, NULL,
send_progress_thread, &pa);
if (error != 0) {
if (redact_book != NULL)
free(redact_book);
zfs_close(zhp);
return (error);
}
}
error = lzc_send_resume_redacted(zhp->zfs_name, fromname, outfd,
lzc_flags, resumeobj, resumeoff, redact_book);
if (redact_book != NULL)
free(redact_book);
if (flags->progress) {
void *status = NULL;
(void) pthread_cancel(tid);
(void) pthread_join(tid, &status);
int error = (int)(uintptr_t)status;
if (error != 0 && status != PTHREAD_CANCELED) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"progress thread exited nonzero"));
return (zfs_standard_error(hdl, error, errbuf));
}
}
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), zhp->zfs_name);
zfs_close(zhp);
switch (error) {
case 0:
return (0);
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"source key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ESRCH:
if (lzc_exists(zhp->zfs_name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source could not be found"));
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EXDEV:
case ENOENT:
case EDQUOT:
case EFBIG:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EFAULT:
case EROFS:
zfs_error_aux(hdl, "%s", strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
} else {
if (redact_book != NULL)
free(redact_book);
}
zfs_close(zhp);
return (error);
}
int
zfs_send_resume(libzfs_handle_t *hdl, sendflags_t *flags, int outfd,
const char *resume_token)
{
int ret;
char errbuf[1024];
nvlist_t *resume_nvl;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot resume send"));
resume_nvl = zfs_send_resume_token_to_nvlist(hdl, resume_token);
if (resume_nvl == NULL) {
/*
* zfs_error_aux has already been set by
* zfs_send_resume_token_to_nvlist()
*/
return (zfs_error(hdl, EZFS_FAULT, errbuf));
}
ret = zfs_send_resume_impl(hdl, flags, outfd, resume_nvl);
fnvlist_free(resume_nvl);
return (ret);
}
int
zfs_send_saved(zfs_handle_t *zhp, sendflags_t *flags, int outfd,
const char *resume_token)
{
int ret;
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *saved_nvl = NULL, *resume_nvl = NULL;
uint64_t saved_guid = 0, resume_guid = 0;
uint64_t obj = 0, off = 0, bytes = 0;
char token_buf[ZFS_MAXPROPLEN];
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"saved send failed"));
ret = zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
token_buf, sizeof (token_buf), NULL, NULL, 0, B_TRUE);
if (ret != 0)
goto out;
saved_nvl = zfs_send_resume_token_to_nvlist(hdl, token_buf);
if (saved_nvl == NULL) {
/*
* zfs_error_aux has already been set by
* zfs_send_resume_token_to_nvlist()
*/
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
/*
* If a resume token is provided we use the object and offset
* from that instead of the default, which starts from the
* beginning.
*/
if (resume_token != NULL) {
resume_nvl = zfs_send_resume_token_to_nvlist(hdl,
resume_token);
if (resume_nvl == NULL) {
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (nvlist_lookup_uint64(resume_nvl, "object", &obj) != 0 ||
nvlist_lookup_uint64(resume_nvl, "offset", &off) != 0 ||
nvlist_lookup_uint64(resume_nvl, "bytes", &bytes) != 0 ||
nvlist_lookup_uint64(resume_nvl, "toguid",
&resume_guid) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"provided resume token is corrupt"));
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (nvlist_lookup_uint64(saved_nvl, "toguid",
&saved_guid)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset's resume token is corrupt"));
ret = zfs_error(hdl, EZFS_FAULT, errbuf);
goto out;
}
if (resume_guid != saved_guid) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"provided resume token does not match dataset"));
ret = zfs_error(hdl, EZFS_BADBACKUP, errbuf);
goto out;
}
}
(void) nvlist_remove_all(saved_nvl, "object");
fnvlist_add_uint64(saved_nvl, "object", obj);
(void) nvlist_remove_all(saved_nvl, "offset");
fnvlist_add_uint64(saved_nvl, "offset", off);
(void) nvlist_remove_all(saved_nvl, "bytes");
fnvlist_add_uint64(saved_nvl, "bytes", bytes);
(void) nvlist_remove_all(saved_nvl, "toname");
fnvlist_add_string(saved_nvl, "toname", zhp->zfs_name);
ret = zfs_send_resume_impl(hdl, flags, outfd, saved_nvl);
out:
fnvlist_free(saved_nvl);
fnvlist_free(resume_nvl);
return (ret);
}
/*
* This function informs the target system that the recursive send is complete.
* The record is also expected in the case of a send -p.
*/
static int
send_conclusion_record(int fd, zio_cksum_t *zc)
{
dmu_replay_record_t drr = { 0 };
drr.drr_type = DRR_END;
if (zc != NULL)
drr.drr_u.drr_end.drr_checksum = *zc;
if (write(fd, &drr, sizeof (drr)) == -1) {
return (errno);
}
return (0);
}
/*
* This function is responsible for sending the records that contain the
* necessary information for the target system's libzfs to be able to set the
* properties of the filesystem being received, or to be able to prepare for
* a recursive receive.
*
* The "zhp" argument is the handle of the snapshot we are sending
* (the "tosnap"). The "from" argument is the short snapshot name (the part
* after the @) of the incremental source.
*/
static int
send_prelim_records(zfs_handle_t *zhp, const char *from, int fd,
boolean_t gather_props, boolean_t recursive, boolean_t verbose,
boolean_t dryrun, boolean_t raw, boolean_t replicate, boolean_t skipmissing,
boolean_t backup, boolean_t holds, boolean_t props, boolean_t doall,
nvlist_t **fssp, avl_tree_t **fsavlp)
{
int err = 0;
char *packbuf = NULL;
size_t buflen = 0;
zio_cksum_t zc = { {0} };
int featureflags = 0;
/* name of filesystem/volume that contains snapshot we are sending */
char tofs[ZFS_MAX_DATASET_NAME_LEN];
/* short name of snap we are sending */
char *tosnap = "";
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), zhp->zfs_name);
if (zhp->zfs_type == ZFS_TYPE_FILESYSTEM && zfs_prop_get_int(zhp,
ZFS_PROP_VERSION) >= ZPL_VERSION_SA) {
featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
}
if (holds)
featureflags |= DMU_BACKUP_FEATURE_HOLDS;
(void) strlcpy(tofs, zhp->zfs_name, ZFS_MAX_DATASET_NAME_LEN);
char *at = strchr(tofs, '@');
if (at != NULL) {
*at = '\0';
tosnap = at + 1;
}
if (gather_props) {
nvlist_t *hdrnv = fnvlist_alloc();
nvlist_t *fss = NULL;
if (from != NULL)
fnvlist_add_string(hdrnv, "fromsnap", from);
fnvlist_add_string(hdrnv, "tosnap", tosnap);
if (!recursive)
fnvlist_add_boolean(hdrnv, "not_recursive");
if (raw) {
fnvlist_add_boolean(hdrnv, "raw");
}
if ((err = gather_nvlist(zhp->zfs_hdl, tofs,
from, tosnap, recursive, raw, doall, replicate, skipmissing,
verbose, backup, holds, props, &fss, fsavlp)) != 0) {
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
/*
* Do not allow the size of the properties list to exceed
* the limit
*/
if ((fnvlist_size(fss) + fnvlist_size(hdrnv)) >
zhp->zfs_hdl->libzfs_max_nvlist) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "warning: cannot send '%s': "
"the size of the list of snapshots and properties "
"is too large to be received successfully.\n"
"Select a smaller number of snapshots to send.\n"),
zhp->zfs_name);
return (zfs_error(zhp->zfs_hdl, EZFS_NOSPC,
errbuf));
}
fnvlist_add_nvlist(hdrnv, "fss", fss);
VERIFY0(nvlist_pack(hdrnv, &packbuf, &buflen, NV_ENCODE_XDR,
0));
if (fssp != NULL) {
*fssp = fss;
} else {
fnvlist_free(fss);
}
fnvlist_free(hdrnv);
}
if (!dryrun) {
dmu_replay_record_t drr = { 0 };
/* write first begin record */
drr.drr_type = DRR_BEGIN;
drr.drr_u.drr_begin.drr_magic = DMU_BACKUP_MAGIC;
DMU_SET_STREAM_HDRTYPE(drr.drr_u.drr_begin.
drr_versioninfo, DMU_COMPOUNDSTREAM);
DMU_SET_FEATUREFLAGS(drr.drr_u.drr_begin.
drr_versioninfo, featureflags);
if (snprintf(drr.drr_u.drr_begin.drr_toname,
sizeof (drr.drr_u.drr_begin.drr_toname), "%s@%s", tofs,
tosnap) >= sizeof (drr.drr_u.drr_begin.drr_toname)) {
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
drr.drr_payloadlen = buflen;
err = dump_record(&drr, packbuf, buflen, &zc, fd);
free(packbuf);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
err = send_conclusion_record(fd, &zc);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(err));
return (zfs_error(zhp->zfs_hdl, EZFS_BADBACKUP,
errbuf));
}
}
return (0);
}
/*
* Generate a send stream. The "zhp" argument is the filesystem/volume
* that contains the snapshot to send. The "fromsnap" argument is the
* short name (the part after the '@') of the snapshot that is the
* incremental source to send from (if non-NULL). The "tosnap" argument
* is the short name of the snapshot to send.
*
* The content of the send stream is the snapshot identified by
* 'tosnap'. Incremental streams are requested in two ways:
* - from the snapshot identified by "fromsnap" (if non-null) or
* - from the origin of the dataset identified by zhp, which must
* be a clone. In this case, "fromsnap" is null and "fromorigin"
* is TRUE.
*
* The send stream is recursive (i.e. dumps a hierarchy of snapshots) and
* uses a special header (with a hdrtype field of DMU_COMPOUNDSTREAM)
* if "replicate" is set. If "doall" is set, dump all the intermediate
* snapshots. The DMU_COMPOUNDSTREAM header is used in the "doall"
* case too. If "props" is set, send properties.
*/
int
zfs_send(zfs_handle_t *zhp, const char *fromsnap, const char *tosnap,
sendflags_t *flags, int outfd, snapfilter_cb_t filter_func,
void *cb_arg, nvlist_t **debugnvp)
{
char errbuf[1024];
send_dump_data_t sdd = { 0 };
int err = 0;
nvlist_t *fss = NULL;
avl_tree_t *fsavl = NULL;
static uint64_t holdseq;
int spa_version;
FILE *fout;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot send '%s'"), zhp->zfs_name);
if (fromsnap && fromsnap[0] == '\0') {
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"zero-length incremental source"));
return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
}
if (flags->replicate || flags->doall || flags->props ||
flags->holds || flags->backup) {
char full_tosnap_name[ZFS_MAX_DATASET_NAME_LEN];
if (snprintf(full_tosnap_name, sizeof (full_tosnap_name),
"%s@%s", zhp->zfs_name, tosnap) >=
sizeof (full_tosnap_name)) {
err = EINVAL;
goto stderr_out;
}
zfs_handle_t *tosnap = zfs_open(zhp->zfs_hdl,
full_tosnap_name, ZFS_TYPE_SNAPSHOT);
if (tosnap == NULL) {
err = -1;
goto err_out;
}
err = send_prelim_records(tosnap, fromsnap, outfd,
flags->replicate || flags->props || flags->holds,
flags->replicate, flags->verbosity > 0, flags->dryrun,
flags->raw, flags->replicate, flags->skipmissing,
flags->backup, flags->holds, flags->props, flags->doall,
&fss, &fsavl);
zfs_close(tosnap);
if (err != 0)
goto err_out;
}
/* dump each stream */
sdd.fromsnap = fromsnap;
sdd.tosnap = tosnap;
sdd.outfd = outfd;
sdd.replicate = flags->replicate;
sdd.doall = flags->doall;
sdd.fromorigin = flags->fromorigin;
sdd.fss = fss;
sdd.fsavl = fsavl;
sdd.verbosity = flags->verbosity;
sdd.parsable = flags->parsable;
sdd.progress = flags->progress;
sdd.dryrun = flags->dryrun;
sdd.large_block = flags->largeblock;
sdd.embed_data = flags->embed_data;
sdd.compress = flags->compress;
sdd.raw = flags->raw;
sdd.holds = flags->holds;
sdd.filter_cb = filter_func;
sdd.filter_cb_arg = cb_arg;
if (debugnvp)
sdd.debugnv = *debugnvp;
if (sdd.verbosity != 0 && sdd.dryrun)
sdd.std_out = B_TRUE;
fout = sdd.std_out ? stdout : stderr;
/*
* Some flags require that we place user holds on the datasets that are
* being sent so they don't get destroyed during the send. We can skip
* this step if the pool is imported read-only since the datasets cannot
* be destroyed.
*/
if (!flags->dryrun && !zpool_get_prop_int(zfs_get_pool_handle(zhp),
ZPOOL_PROP_READONLY, NULL) &&
zfs_spa_version(zhp, &spa_version) == 0 &&
spa_version >= SPA_VERSION_USERREFS &&
(flags->doall || flags->replicate)) {
++holdseq;
(void) snprintf(sdd.holdtag, sizeof (sdd.holdtag),
".send-%d-%llu", getpid(), (u_longlong_t)holdseq);
sdd.cleanup_fd = open(ZFS_DEV, O_RDWR | O_CLOEXEC);
if (sdd.cleanup_fd < 0) {
err = errno;
goto stderr_out;
}
sdd.snapholds = fnvlist_alloc();
} else {
sdd.cleanup_fd = -1;
sdd.snapholds = NULL;
}
if (flags->verbosity != 0 || sdd.snapholds != NULL) {
/*
* Do a verbose no-op dry run to get all the verbose output
* or to gather snapshot hold's before generating any data,
* then do a non-verbose real run to generate the streams.
*/
sdd.dryrun = B_TRUE;
err = dump_filesystems(zhp, &sdd);
if (err != 0)
goto stderr_out;
if (flags->verbosity != 0) {
if (flags->parsable) {
(void) fprintf(fout, "size\t%llu\n",
(longlong_t)sdd.size);
} else {
char buf[16];
zfs_nicebytes(sdd.size, buf, sizeof (buf));
(void) fprintf(fout, dgettext(TEXT_DOMAIN,
"total estimated size is %s\n"), buf);
}
}
/* Ensure no snaps found is treated as an error. */
if (!sdd.seento) {
err = ENOENT;
goto err_out;
}
/* Skip the second run if dryrun was requested. */
if (flags->dryrun)
goto err_out;
if (sdd.snapholds != NULL) {
err = zfs_hold_nvl(zhp, sdd.cleanup_fd, sdd.snapholds);
if (err != 0)
goto stderr_out;
fnvlist_free(sdd.snapholds);
sdd.snapholds = NULL;
}
sdd.dryrun = B_FALSE;
sdd.verbosity = 0;
}
err = dump_filesystems(zhp, &sdd);
fsavl_destroy(fsavl);
fnvlist_free(fss);
/* Ensure no snaps found is treated as an error. */
if (err == 0 && !sdd.seento)
err = ENOENT;
if (sdd.cleanup_fd != -1) {
VERIFY(0 == close(sdd.cleanup_fd));
sdd.cleanup_fd = -1;
}
if (!flags->dryrun && (flags->replicate || flags->doall ||
flags->props || flags->backup || flags->holds)) {
/*
* write final end record. NB: want to do this even if
* there was some error, because it might not be totally
* failed.
*/
err = send_conclusion_record(outfd, NULL);
if (err != 0)
return (zfs_standard_error(zhp->zfs_hdl, err, errbuf));
}
return (err || sdd.err);
stderr_out:
err = zfs_standard_error(zhp->zfs_hdl, err, errbuf);
err_out:
fsavl_destroy(fsavl);
fnvlist_free(fss);
fnvlist_free(sdd.snapholds);
if (sdd.cleanup_fd != -1)
VERIFY(0 == close(sdd.cleanup_fd));
return (err);
}
static zfs_handle_t *
name_to_dir_handle(libzfs_handle_t *hdl, const char *snapname)
{
char dirname[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(dirname, snapname, ZFS_MAX_DATASET_NAME_LEN);
char *c = strchr(dirname, '@');
if (c != NULL)
*c = '\0';
return (zfs_open(hdl, dirname, ZFS_TYPE_DATASET));
}
/*
* Returns B_TRUE if earlier is an earlier snapshot in later's timeline; either
* an earlier snapshot in the same filesystem, or a snapshot before later's
* origin, or it's origin's origin, etc.
*/
static boolean_t
snapshot_is_before(zfs_handle_t *earlier, zfs_handle_t *later)
{
boolean_t ret;
uint64_t later_txg =
(later->zfs_type == ZFS_TYPE_FILESYSTEM ||
later->zfs_type == ZFS_TYPE_VOLUME ?
UINT64_MAX : zfs_prop_get_int(later, ZFS_PROP_CREATETXG));
uint64_t earlier_txg = zfs_prop_get_int(earlier, ZFS_PROP_CREATETXG);
if (earlier_txg >= later_txg)
return (B_FALSE);
zfs_handle_t *earlier_dir = name_to_dir_handle(earlier->zfs_hdl,
earlier->zfs_name);
zfs_handle_t *later_dir = name_to_dir_handle(later->zfs_hdl,
later->zfs_name);
if (strcmp(earlier_dir->zfs_name, later_dir->zfs_name) == 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
return (B_TRUE);
}
char clonename[ZFS_MAX_DATASET_NAME_LEN];
if (zfs_prop_get(later_dir, ZFS_PROP_ORIGIN, clonename,
ZFS_MAX_DATASET_NAME_LEN, NULL, NULL, 0, B_TRUE) != 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
return (B_FALSE);
}
zfs_handle_t *origin = zfs_open(earlier->zfs_hdl, clonename,
ZFS_TYPE_DATASET);
uint64_t origin_txg = zfs_prop_get_int(origin, ZFS_PROP_CREATETXG);
/*
* If "earlier" is exactly the origin, then
* snapshot_is_before(earlier, origin) will return false (because
* they're the same).
*/
if (origin_txg == earlier_txg &&
strcmp(origin->zfs_name, earlier->zfs_name) == 0) {
zfs_close(earlier_dir);
zfs_close(later_dir);
zfs_close(origin);
return (B_TRUE);
}
zfs_close(earlier_dir);
zfs_close(later_dir);
ret = snapshot_is_before(earlier, origin);
zfs_close(origin);
return (ret);
}
/*
* The "zhp" argument is the handle of the dataset to send (typically a
* snapshot). The "from" argument is the full name of the snapshot or
* bookmark that is the incremental source.
*/
int
zfs_send_one(zfs_handle_t *zhp, const char *from, int fd, sendflags_t *flags,
const char *redactbook)
{
int err;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char *name = zhp->zfs_name;
pthread_t ptid;
progress_arg_t pa = { 0 };
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"warning: cannot send '%s'"), name);
if (from != NULL && strchr(from, '@')) {
zfs_handle_t *from_zhp = zfs_open(hdl, from,
ZFS_TYPE_DATASET);
if (from_zhp == NULL)
return (-1);
if (!snapshot_is_before(from_zhp, zhp)) {
zfs_close(from_zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
}
zfs_close(from_zhp);
}
if (redactbook != NULL) {
char bookname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *redact_snaps;
zfs_handle_t *book_zhp;
char *at, *pound;
int dsnamelen;
pound = strchr(redactbook, '#');
if (pound != NULL)
redactbook = pound + 1;
at = strchr(name, '@');
if (at == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot do a redacted send to a filesystem"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
dsnamelen = at - name;
if (snprintf(bookname, sizeof (bookname), "%.*s#%s",
dsnamelen, name, redactbook)
>= sizeof (bookname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid bookmark name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
book_zhp = zfs_open(hdl, bookname, ZFS_TYPE_BOOKMARK);
if (book_zhp == NULL)
return (-1);
if (nvlist_lookup_nvlist(book_zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS),
&redact_snaps) != 0 || redact_snaps == NULL) {
zfs_close(book_zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not a redaction bookmark"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
zfs_close(book_zhp);
}
/*
* Send fs properties
*/
if (flags->props || flags->holds || flags->backup) {
/*
* Note: the header generated by send_prelim_records()
* assumes that the incremental source is in the same
* filesystem/volume as the target (which is a requirement
* when doing "zfs send -R"). But that isn't always the
* case here (e.g. send from snap in origin, or send from
* bookmark). We pass from=NULL, which will omit this
* information from the prelim records; it isn't used
* when receiving this type of stream.
*/
err = send_prelim_records(zhp, NULL, fd, B_TRUE, B_FALSE,
flags->verbosity > 0, flags->dryrun, flags->raw,
flags->replicate, B_FALSE, flags->backup, flags->holds,
flags->props, flags->doall, NULL, NULL);
if (err != 0)
return (err);
}
/*
* Perform size estimate if verbose was specified.
*/
if (flags->verbosity != 0) {
err = estimate_size(zhp, from, fd, flags, 0, 0, 0, redactbook,
errbuf);
if (err != 0)
return (err);
}
if (flags->dryrun)
return (0);
/*
* If progress reporting is requested, spawn a new thread to poll
* ZFS_IOC_SEND_PROGRESS at a regular interval.
*/
if (flags->progress) {
pa.pa_zhp = zhp;
pa.pa_fd = fd;
pa.pa_parsable = flags->parsable;
pa.pa_estimate = B_FALSE;
pa.pa_verbosity = flags->verbosity;
err = pthread_create(&ptid, NULL,
send_progress_thread, &pa);
if (err != 0) {
zfs_error_aux(zhp->zfs_hdl, "%s", strerror(errno));
return (zfs_error(zhp->zfs_hdl,
EZFS_THREADCREATEFAILED, errbuf));
}
}
err = lzc_send_redacted(name, from, fd,
lzc_flags_from_sendflags(flags), redactbook);
if (flags->progress) {
void *status = NULL;
if (err != 0)
(void) pthread_cancel(ptid);
(void) pthread_join(ptid, &status);
int error = (int)(uintptr_t)status;
if (error != 0 && status != PTHREAD_CANCELED)
return (zfs_standard_error_fmt(hdl, error,
dgettext(TEXT_DOMAIN,
"progress thread exited nonzero")));
}
if (flags->props || flags->holds || flags->backup) {
/* Write the final end record. */
err = send_conclusion_record(fd, NULL);
if (err != 0)
return (zfs_standard_error(hdl, err, errbuf));
}
if (err != 0) {
switch (errno) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not an earlier snapshot from the same fs"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
case ENOENT:
case ESRCH:
if (lzc_exists(name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental source (%s) does not exist"),
from);
}
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset key must be loaded"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"target is busy; if a filesystem, "
"it must not be mounted"));
return (zfs_error(hdl, EZFS_BUSY, errbuf));
case EDQUOT:
case EFAULT:
case EFBIG:
case EINVAL:
case EIO:
case ENOLINK:
case ENOSPC:
case ENOSTR:
case ENXIO:
case EPIPE:
case ERANGE:
case EROFS:
zfs_error_aux(hdl, "%s", strerror(errno));
return (zfs_error(hdl, EZFS_BADBACKUP, errbuf));
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
return (err != 0);
}
/*
* Routines specific to "zfs recv"
*/
static int
recv_read(libzfs_handle_t *hdl, int fd, void *buf, int ilen,
boolean_t byteswap, zio_cksum_t *zc)
{
char *cp = buf;
int rv;
int len = ilen;
do {
rv = read(fd, cp, len);
cp += rv;
len -= rv;
} while (rv > 0);
if (rv < 0 || len != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to read from stream"));
return (zfs_error(hdl, EZFS_BADSTREAM, dgettext(TEXT_DOMAIN,
"cannot receive")));
}
if (zc) {
if (byteswap)
fletcher_4_incremental_byteswap(buf, ilen, zc);
else
fletcher_4_incremental_native(buf, ilen, zc);
}
return (0);
}
static int
recv_read_nvlist(libzfs_handle_t *hdl, int fd, int len, nvlist_t **nvp,
boolean_t byteswap, zio_cksum_t *zc)
{
char *buf;
int err;
buf = zfs_alloc(hdl, len);
if (buf == NULL)
return (ENOMEM);
if (len > hdl->libzfs_max_nvlist) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "nvlist too large"));
free(buf);
return (ENOMEM);
}
err = recv_read(hdl, fd, buf, len, byteswap, zc);
if (err != 0) {
free(buf);
return (err);
}
err = nvlist_unpack(buf, len, nvp, 0);
free(buf);
if (err != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (malformed nvlist)"));
return (EINVAL);
}
return (0);
}
/*
* Returns the grand origin (origin of origin of origin...) of a given handle.
* If this dataset is not a clone, it simply returns a copy of the original
* handle.
*/
static zfs_handle_t *
recv_open_grand_origin(zfs_handle_t *zhp)
{
char origin[ZFS_MAX_DATASET_NAME_LEN];
zprop_source_t src;
zfs_handle_t *ozhp = zfs_handle_dup(zhp);
while (ozhp != NULL) {
if (zfs_prop_get(ozhp, ZFS_PROP_ORIGIN, origin,
sizeof (origin), &src, NULL, 0, B_FALSE) != 0)
break;
(void) zfs_close(ozhp);
ozhp = zfs_open(zhp->zfs_hdl, origin, ZFS_TYPE_FILESYSTEM);
}
return (ozhp);
}
static int
recv_rename_impl(zfs_handle_t *zhp, const char *name, const char *newname)
{
int err;
zfs_handle_t *ozhp = NULL;
/*
* Attempt to rename the dataset. If it fails with EACCES we have
* attempted to rename the dataset outside of its encryption root.
* Force the dataset to become an encryption root and try again.
*/
err = lzc_rename(name, newname);
if (err == EACCES) {
ozhp = recv_open_grand_origin(zhp);
if (ozhp == NULL) {
err = ENOENT;
goto out;
}
err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY,
NULL, NULL, 0);
if (err != 0)
goto out;
err = lzc_rename(name, newname);
}
out:
if (ozhp != NULL)
zfs_close(ozhp);
return (err);
}
static int
recv_rename(libzfs_handle_t *hdl, const char *name, const char *tryname,
int baselen, char *newname, recvflags_t *flags)
{
static int seq;
int err;
prop_changelist_t *clp = NULL;
zfs_handle_t *zhp = NULL;
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = -1;
goto out;
}
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->force ? MS_FORCE : 0);
if (clp == NULL) {
err = -1;
goto out;
}
err = changelist_prefix(clp);
if (err)
goto out;
if (tryname) {
(void) strcpy(newname, tryname);
if (flags->verbose) {
(void) printf("attempting rename %s to %s\n",
name, newname);
}
err = recv_rename_impl(zhp, name, newname);
if (err == 0)
changelist_rename(clp, name, tryname);
} else {
err = ENOENT;
}
if (err != 0 && strncmp(name + baselen, "recv-", 5) != 0) {
seq++;
(void) snprintf(newname, ZFS_MAX_DATASET_NAME_LEN,
"%.*srecv-%u-%u", baselen, name, getpid(), seq);
if (flags->verbose) {
(void) printf("failed - trying rename %s to %s\n",
name, newname);
}
err = recv_rename_impl(zhp, name, newname);
if (err == 0)
changelist_rename(clp, name, newname);
if (err && flags->verbose) {
(void) printf("failed (%u) - "
"will try again on next pass\n", errno);
}
err = EAGAIN;
} else if (flags->verbose) {
if (err == 0)
(void) printf("success\n");
else
(void) printf("failed (%u)\n", errno);
}
(void) changelist_postfix(clp);
out:
if (clp != NULL)
changelist_free(clp);
if (zhp != NULL)
zfs_close(zhp);
return (err);
}
static int
recv_promote(libzfs_handle_t *hdl, const char *fsname,
const char *origin_fsname, recvflags_t *flags)
{
int err;
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp = NULL, *ozhp = NULL;
if (flags->verbose)
(void) printf("promoting %s\n", fsname);
(void) strlcpy(zc.zc_value, origin_fsname, sizeof (zc.zc_value));
(void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_name));
/*
* Attempt to promote the dataset. If it fails with EACCES the
* promotion would cause this dataset to leave its encryption root.
* Force the origin to become an encryption root and try again.
*/
err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
if (err == EACCES) {
zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = -1;
goto out;
}
ozhp = recv_open_grand_origin(zhp);
if (ozhp == NULL) {
err = -1;
goto out;
}
err = lzc_change_key(ozhp->zfs_name, DCP_CMD_FORCE_NEW_KEY,
NULL, NULL, 0);
if (err != 0)
goto out;
err = zfs_ioctl(hdl, ZFS_IOC_PROMOTE, &zc);
}
out:
if (zhp != NULL)
zfs_close(zhp);
if (ozhp != NULL)
zfs_close(ozhp);
return (err);
}
static int
recv_destroy(libzfs_handle_t *hdl, const char *name, int baselen,
char *newname, recvflags_t *flags)
{
int err = 0;
prop_changelist_t *clp;
zfs_handle_t *zhp;
boolean_t defer = B_FALSE;
int spa_version;
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL)
return (-1);
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->force ? MS_FORCE : 0);
if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT &&
zfs_spa_version(zhp, &spa_version) == 0 &&
spa_version >= SPA_VERSION_USERREFS)
defer = B_TRUE;
zfs_close(zhp);
if (clp == NULL)
return (-1);
err = changelist_prefix(clp);
if (err)
return (err);
if (flags->verbose)
(void) printf("attempting destroy %s\n", name);
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_boolean(nv, name);
err = lzc_destroy_snaps(nv, defer, NULL);
fnvlist_free(nv);
} else {
err = lzc_destroy(name);
}
if (err == 0) {
if (flags->verbose)
(void) printf("success\n");
changelist_remove(clp, name);
}
(void) changelist_postfix(clp);
changelist_free(clp);
/*
* Deferred destroy might destroy the snapshot or only mark it to be
* destroyed later, and it returns success in either case.
*/
if (err != 0 || (defer && zfs_dataset_exists(hdl, name,
ZFS_TYPE_SNAPSHOT))) {
err = recv_rename(hdl, name, NULL, baselen, newname, flags);
}
return (err);
}
typedef struct guid_to_name_data {
uint64_t guid;
boolean_t bookmark_ok;
char *name;
char *skip;
uint64_t *redact_snap_guids;
uint64_t num_redact_snaps;
} guid_to_name_data_t;
static boolean_t
redact_snaps_match(zfs_handle_t *zhp, guid_to_name_data_t *gtnd)
{
uint64_t *bmark_snaps;
uint_t bmark_num_snaps;
nvlist_t *nvl;
if (zhp->zfs_type != ZFS_TYPE_BOOKMARK)
return (B_FALSE);
nvl = fnvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_REDACT_SNAPS));
bmark_snaps = fnvlist_lookup_uint64_array(nvl, ZPROP_VALUE,
&bmark_num_snaps);
if (bmark_num_snaps != gtnd->num_redact_snaps)
return (B_FALSE);
int i = 0;
for (; i < bmark_num_snaps; i++) {
int j = 0;
for (; j < bmark_num_snaps; j++) {
if (bmark_snaps[i] == gtnd->redact_snap_guids[j])
break;
}
if (j == bmark_num_snaps)
break;
}
return (i == bmark_num_snaps);
}
static int
guid_to_name_cb(zfs_handle_t *zhp, void *arg)
{
guid_to_name_data_t *gtnd = arg;
const char *slash;
int err;
if (gtnd->skip != NULL &&
(slash = strrchr(zhp->zfs_name, '/')) != NULL &&
strcmp(slash + 1, gtnd->skip) == 0) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_GUID) == gtnd->guid &&
(gtnd->num_redact_snaps == -1 || redact_snaps_match(zhp, gtnd))) {
(void) strcpy(gtnd->name, zhp->zfs_name);
zfs_close(zhp);
return (EEXIST);
}
err = zfs_iter_children(zhp, guid_to_name_cb, gtnd);
if (err != EEXIST && gtnd->bookmark_ok)
err = zfs_iter_bookmarks(zhp, guid_to_name_cb, gtnd);
zfs_close(zhp);
return (err);
}
/*
* Attempt to find the local dataset associated with this guid. In the case of
* multiple matches, we attempt to find the "best" match by searching
* progressively larger portions of the hierarchy. This allows one to send a
* tree of datasets individually and guarantee that we will find the source
* guid within that hierarchy, even if there are multiple matches elsewhere.
*
* If num_redact_snaps is not -1, we attempt to find a redaction bookmark with
* the specified number of redaction snapshots. If num_redact_snaps isn't 0 or
* -1, then redact_snap_guids will be an array of the guids of the snapshots the
* redaction bookmark was created with. If num_redact_snaps is -1, then we will
* attempt to find a snapshot or bookmark (if bookmark_ok is passed) with the
* given guid. Note that a redaction bookmark can be returned if
* num_redact_snaps == -1.
*/
static int
guid_to_name_redact_snaps(libzfs_handle_t *hdl, const char *parent,
uint64_t guid, boolean_t bookmark_ok, uint64_t *redact_snap_guids,
uint64_t num_redact_snaps, char *name)
{
char pname[ZFS_MAX_DATASET_NAME_LEN];
guid_to_name_data_t gtnd;
gtnd.guid = guid;
gtnd.bookmark_ok = bookmark_ok;
gtnd.name = name;
gtnd.skip = NULL;
gtnd.redact_snap_guids = redact_snap_guids;
gtnd.num_redact_snaps = num_redact_snaps;
/*
* Search progressively larger portions of the hierarchy, starting
* with the filesystem specified by 'parent'. This will
* select the "most local" version of the origin snapshot in the case
* that there are multiple matching snapshots in the system.
*/
(void) strlcpy(pname, parent, sizeof (pname));
char *cp = strrchr(pname, '@');
if (cp == NULL)
cp = strchr(pname, '\0');
for (; cp != NULL; cp = strrchr(pname, '/')) {
/* Chop off the last component and open the parent */
*cp = '\0';
zfs_handle_t *zhp = make_dataset_handle(hdl, pname);
if (zhp == NULL)
continue;
int err = guid_to_name_cb(zfs_handle_dup(zhp), &gtnd);
if (err != EEXIST)
err = zfs_iter_children(zhp, guid_to_name_cb, &gtnd);
if (err != EEXIST && bookmark_ok)
err = zfs_iter_bookmarks(zhp, guid_to_name_cb, &gtnd);
zfs_close(zhp);
if (err == EEXIST)
return (0);
/*
* Remember the last portion of the dataset so we skip it next
* time through (as we've already searched that portion of the
* hierarchy).
*/
gtnd.skip = strrchr(pname, '/') + 1;
}
return (ENOENT);
}
static int
guid_to_name(libzfs_handle_t *hdl, const char *parent, uint64_t guid,
boolean_t bookmark_ok, char *name)
{
return (guid_to_name_redact_snaps(hdl, parent, guid, bookmark_ok, NULL,
-1, name));
}
/*
* Return +1 if guid1 is before guid2, 0 if they are the same, and -1 if
* guid1 is after guid2.
*/
static int
created_before(libzfs_handle_t *hdl, avl_tree_t *avl,
uint64_t guid1, uint64_t guid2)
{
nvlist_t *nvfs;
char *fsname = NULL, *snapname = NULL;
char buf[ZFS_MAX_DATASET_NAME_LEN];
int rv;
zfs_handle_t *guid1hdl, *guid2hdl;
uint64_t create1, create2;
if (guid2 == 0)
return (0);
if (guid1 == 0)
return (1);
nvfs = fsavl_find(avl, guid1, &snapname);
fsname = fnvlist_lookup_string(nvfs, "name");
(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
guid1hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
if (guid1hdl == NULL)
return (-1);
nvfs = fsavl_find(avl, guid2, &snapname);
fsname = fnvlist_lookup_string(nvfs, "name");
(void) snprintf(buf, sizeof (buf), "%s@%s", fsname, snapname);
guid2hdl = zfs_open(hdl, buf, ZFS_TYPE_SNAPSHOT);
if (guid2hdl == NULL) {
zfs_close(guid1hdl);
return (-1);
}
create1 = zfs_prop_get_int(guid1hdl, ZFS_PROP_CREATETXG);
create2 = zfs_prop_get_int(guid2hdl, ZFS_PROP_CREATETXG);
if (create1 < create2)
rv = -1;
else if (create1 > create2)
rv = +1;
else
rv = 0;
zfs_close(guid1hdl);
zfs_close(guid2hdl);
return (rv);
}
/*
* This function reestablishes the hierarchy of encryption roots after a
* recursive incremental receive has completed. This must be done after the
* second call to recv_incremental_replication() has renamed and promoted all
* sent datasets to their final locations in the dataset hierarchy.
*/
static int
recv_fix_encryption_hierarchy(libzfs_handle_t *hdl, const char *top_zfs,
nvlist_t *stream_nv, avl_tree_t *stream_avl)
{
int err;
nvpair_t *fselem = NULL;
nvlist_t *stream_fss;
stream_fss = fnvlist_lookup_nvlist(stream_nv, "fss");
while ((fselem = nvlist_next_nvpair(stream_fss, fselem)) != NULL) {
zfs_handle_t *zhp = NULL;
uint64_t crypt;
nvlist_t *snaps, *props, *stream_nvfs = NULL;
nvpair_t *snapel = NULL;
boolean_t is_encroot, is_clone, stream_encroot;
char *cp;
char *stream_keylocation = NULL;
char keylocation[MAXNAMELEN];
char fsname[ZFS_MAX_DATASET_NAME_LEN];
keylocation[0] = '\0';
stream_nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(stream_nvfs, "snaps");
props = fnvlist_lookup_nvlist(stream_nvfs, "props");
stream_encroot = nvlist_exists(stream_nvfs, "is_encroot");
/* find a snapshot from the stream that exists locally */
err = ENOENT;
while ((snapel = nvlist_next_nvpair(snaps, snapel)) != NULL) {
uint64_t guid;
guid = fnvpair_value_uint64(snapel);
err = guid_to_name(hdl, top_zfs, guid, B_FALSE,
fsname);
if (err == 0)
break;
}
if (err != 0)
continue;
cp = strchr(fsname, '@');
if (cp != NULL)
*cp = '\0';
zhp = zfs_open(hdl, fsname, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = ENOENT;
goto error;
}
crypt = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
is_clone = zhp->zfs_dmustats.dds_origin[0] != '\0';
(void) zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
/* we don't need to do anything for unencrypted datasets */
if (crypt == ZIO_CRYPT_OFF) {
zfs_close(zhp);
continue;
}
/*
* If the dataset is flagged as an encryption root, was not
* received as a clone and is not currently an encryption root,
* force it to become one. Fixup the keylocation if necessary.
*/
if (stream_encroot) {
if (!is_clone && !is_encroot) {
err = lzc_change_key(fsname,
DCP_CMD_FORCE_NEW_KEY, NULL, NULL, 0);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
stream_keylocation = fnvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION));
/*
* Refresh the properties in case the call to
* lzc_change_key() changed the value.
*/
zfs_refresh_properties(zhp);
err = zfs_prop_get(zhp, ZFS_PROP_KEYLOCATION,
keylocation, sizeof (keylocation), NULL, NULL,
0, B_TRUE);
if (err != 0) {
zfs_close(zhp);
goto error;
}
if (strcmp(keylocation, stream_keylocation) != 0) {
err = zfs_prop_set(zhp,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION),
stream_keylocation);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
}
/*
* If the dataset is not flagged as an encryption root and is
* currently an encryption root, force it to inherit from its
* parent. The root of a raw send should never be
* force-inherited.
*/
if (!stream_encroot && is_encroot &&
strcmp(top_zfs, fsname) != 0) {
err = lzc_change_key(fsname, DCP_CMD_FORCE_INHERIT,
NULL, NULL, 0);
if (err != 0) {
zfs_close(zhp);
goto error;
}
}
zfs_close(zhp);
}
return (0);
error:
return (err);
}
static int
recv_incremental_replication(libzfs_handle_t *hdl, const char *tofs,
recvflags_t *flags, nvlist_t *stream_nv, avl_tree_t *stream_avl,
nvlist_t *renamed)
{
nvlist_t *local_nv, *deleted = NULL;
avl_tree_t *local_avl;
nvpair_t *fselem, *nextfselem;
char *fromsnap;
char newname[ZFS_MAX_DATASET_NAME_LEN];
char guidname[32];
int error;
boolean_t needagain, progress, recursive;
char *s1, *s2;
fromsnap = fnvlist_lookup_string(stream_nv, "fromsnap");
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
if (flags->dryrun)
return (0);
again:
needagain = progress = B_FALSE;
deleted = fnvlist_alloc();
if ((error = gather_nvlist(hdl, tofs, fromsnap, NULL,
recursive, B_TRUE, B_FALSE, recursive, B_FALSE, B_FALSE, B_FALSE,
B_FALSE, B_TRUE, &local_nv, &local_avl)) != 0)
return (error);
/*
* Process deletes and renames
*/
for (fselem = nvlist_next_nvpair(local_nv, NULL);
fselem; fselem = nextfselem) {
nvlist_t *nvfs, *snaps;
nvlist_t *stream_nvfs = NULL;
nvpair_t *snapelem, *nextsnapelem;
uint64_t fromguid = 0;
uint64_t originguid = 0;
uint64_t stream_originguid = 0;
uint64_t parent_fromsnap_guid, stream_parent_fromsnap_guid;
char *fsname, *stream_fsname;
nextfselem = nvlist_next_nvpair(local_nv, fselem);
nvfs = fnvpair_value_nvlist(fselem);
snaps = fnvlist_lookup_nvlist(nvfs, "snaps");
fsname = fnvlist_lookup_string(nvfs, "name");
parent_fromsnap_guid = fnvlist_lookup_uint64(nvfs,
"parentfromsnap");
(void) nvlist_lookup_uint64(nvfs, "origin", &originguid);
/*
* First find the stream's fs, so we can check for
* a different origin (due to "zfs promote")
*/
for (snapelem = nvlist_next_nvpair(snaps, NULL);
snapelem; snapelem = nvlist_next_nvpair(snaps, snapelem)) {
uint64_t thisguid;
thisguid = fnvpair_value_uint64(snapelem);
stream_nvfs = fsavl_find(stream_avl, thisguid, NULL);
if (stream_nvfs != NULL)
break;
}
/* check for promote */
(void) nvlist_lookup_uint64(stream_nvfs, "origin",
&stream_originguid);
if (stream_nvfs && originguid != stream_originguid) {
switch (created_before(hdl, local_avl,
stream_originguid, originguid)) {
case 1: {
/* promote it! */
nvlist_t *origin_nvfs;
char *origin_fsname;
origin_nvfs = fsavl_find(local_avl, originguid,
NULL);
origin_fsname = fnvlist_lookup_string(
origin_nvfs, "name");
error = recv_promote(hdl, fsname, origin_fsname,
flags);
if (error == 0)
progress = B_TRUE;
break;
}
default:
break;
case -1:
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
return (-1);
}
/*
* We had/have the wrong origin, therefore our
* list of snapshots is wrong. Need to handle
* them on the next pass.
*/
needagain = B_TRUE;
continue;
}
for (snapelem = nvlist_next_nvpair(snaps, NULL);
snapelem; snapelem = nextsnapelem) {
uint64_t thisguid;
char *stream_snapname;
nvlist_t *found, *props;
nextsnapelem = nvlist_next_nvpair(snaps, snapelem);
thisguid = fnvpair_value_uint64(snapelem);
found = fsavl_find(stream_avl, thisguid,
&stream_snapname);
/* check for delete */
if (found == NULL) {
char name[ZFS_MAX_DATASET_NAME_LEN];
if (!flags->force)
continue;
(void) snprintf(name, sizeof (name), "%s@%s",
fsname, nvpair_name(snapelem));
error = recv_destroy(hdl, name,
strlen(fsname)+1, newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
sprintf(guidname, "%llu",
(u_longlong_t)thisguid);
nvlist_add_boolean(deleted, guidname);
continue;
}
stream_nvfs = found;
if (0 == nvlist_lookup_nvlist(stream_nvfs, "snapprops",
&props) && 0 == nvlist_lookup_nvlist(props,
stream_snapname, &props)) {
zfs_cmd_t zc = {"\0"};
zc.zc_cookie = B_TRUE; /* received */
(void) snprintf(zc.zc_name, sizeof (zc.zc_name),
"%s@%s", fsname, nvpair_name(snapelem));
if (zcmd_write_src_nvlist(hdl, &zc,
props) == 0) {
(void) zfs_ioctl(hdl,
ZFS_IOC_SET_PROP, &zc);
zcmd_free_nvlists(&zc);
}
}
/* check for different snapname */
if (strcmp(nvpair_name(snapelem),
stream_snapname) != 0) {
char name[ZFS_MAX_DATASET_NAME_LEN];
char tryname[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(name, sizeof (name), "%s@%s",
fsname, nvpair_name(snapelem));
(void) snprintf(tryname, sizeof (name), "%s@%s",
fsname, stream_snapname);
error = recv_rename(hdl, name, tryname,
strlen(fsname)+1, newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
}
if (strcmp(stream_snapname, fromsnap) == 0)
fromguid = thisguid;
}
/* check for delete */
if (stream_nvfs == NULL) {
if (!flags->force)
continue;
error = recv_destroy(hdl, fsname, strlen(tofs)+1,
newname, flags);
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
sprintf(guidname, "%llu",
(u_longlong_t)parent_fromsnap_guid);
nvlist_add_boolean(deleted, guidname);
continue;
}
if (fromguid == 0) {
if (flags->verbose) {
(void) printf("local fs %s does not have "
"fromsnap (%s in stream); must have "
"been deleted locally; ignoring\n",
fsname, fromsnap);
}
continue;
}
stream_fsname = fnvlist_lookup_string(stream_nvfs, "name");
stream_parent_fromsnap_guid = fnvlist_lookup_uint64(
stream_nvfs, "parentfromsnap");
s1 = strrchr(fsname, '/');
s2 = strrchr(stream_fsname, '/');
/*
* Check if we're going to rename based on parent guid change
* and the current parent guid was also deleted. If it was then
* rename will fail and is likely unneeded, so avoid this and
* force an early retry to determine the new
* parent_fromsnap_guid.
*/
if (stream_parent_fromsnap_guid != 0 &&
parent_fromsnap_guid != 0 &&
stream_parent_fromsnap_guid != parent_fromsnap_guid) {
sprintf(guidname, "%llu",
(u_longlong_t)parent_fromsnap_guid);
if (nvlist_exists(deleted, guidname)) {
progress = B_TRUE;
needagain = B_TRUE;
goto doagain;
}
}
/*
* Check for rename. If the exact receive path is specified, it
* does not count as a rename, but we still need to check the
* datasets beneath it.
*/
if ((stream_parent_fromsnap_guid != 0 &&
parent_fromsnap_guid != 0 &&
stream_parent_fromsnap_guid != parent_fromsnap_guid) ||
((flags->isprefix || strcmp(tofs, fsname) != 0) &&
(s1 != NULL) && (s2 != NULL) && strcmp(s1, s2) != 0)) {
nvlist_t *parent;
char tryname[ZFS_MAX_DATASET_NAME_LEN];
parent = fsavl_find(local_avl,
stream_parent_fromsnap_guid, NULL);
/*
* NB: parent might not be found if we used the
* tosnap for stream_parent_fromsnap_guid,
* because the parent is a newly-created fs;
* we'll be able to rename it after we recv the
* new fs.
*/
if (parent != NULL) {
char *pname;
pname = fnvlist_lookup_string(parent, "name");
(void) snprintf(tryname, sizeof (tryname),
"%s%s", pname, strrchr(stream_fsname, '/'));
} else {
tryname[0] = '\0';
if (flags->verbose) {
(void) printf("local fs %s new parent "
"not found\n", fsname);
}
}
newname[0] = '\0';
error = recv_rename(hdl, fsname, tryname,
strlen(tofs)+1, newname, flags);
if (renamed != NULL && newname[0] != '\0') {
fnvlist_add_boolean(renamed, newname);
}
if (error)
needagain = B_TRUE;
else
progress = B_TRUE;
}
}
doagain:
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
fnvlist_free(deleted);
if (needagain && progress) {
/* do another pass to fix up temporary names */
if (flags->verbose)
(void) printf("another pass:\n");
goto again;
}
return (needagain || error != 0);
}
static int
zfs_receive_package(libzfs_handle_t *hdl, int fd, const char *destname,
recvflags_t *flags, dmu_replay_record_t *drr, zio_cksum_t *zc,
char **top_zfs, nvlist_t *cmdprops)
{
nvlist_t *stream_nv = NULL;
avl_tree_t *stream_avl = NULL;
char *fromsnap = NULL;
char *sendsnap = NULL;
char *cp;
char tofs[ZFS_MAX_DATASET_NAME_LEN];
char sendfs[ZFS_MAX_DATASET_NAME_LEN];
char errbuf[1024];
dmu_replay_record_t drre;
int error;
boolean_t anyerr = B_FALSE;
boolean_t softerr = B_FALSE;
boolean_t recursive, raw;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
assert(drr->drr_type == DRR_BEGIN);
assert(drr->drr_u.drr_begin.drr_magic == DMU_BACKUP_MAGIC);
assert(DMU_GET_STREAM_HDRTYPE(drr->drr_u.drr_begin.drr_versioninfo) ==
DMU_COMPOUNDSTREAM);
/*
* Read in the nvlist from the stream.
*/
if (drr->drr_payloadlen != 0) {
error = recv_read_nvlist(hdl, fd, drr->drr_payloadlen,
&stream_nv, flags->byteswap, zc);
if (error) {
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
}
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
raw = (nvlist_lookup_boolean(stream_nv, "raw") == 0);
if (recursive && strchr(destname, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot specify snapshot name for multi-snapshot stream"));
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
/*
* Read in the end record and verify checksum.
*/
if (0 != (error = recv_read(hdl, fd, &drre, sizeof (drre),
flags->byteswap, NULL)))
goto out;
if (flags->byteswap) {
drre.drr_type = BSWAP_32(drre.drr_type);
drre.drr_u.drr_end.drr_checksum.zc_word[0] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[0]);
drre.drr_u.drr_end.drr_checksum.zc_word[1] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[1]);
drre.drr_u.drr_end.drr_checksum.zc_word[2] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[2]);
drre.drr_u.drr_end.drr_checksum.zc_word[3] =
BSWAP_64(drre.drr_u.drr_end.drr_checksum.zc_word[3]);
}
if (drre.drr_type != DRR_END) {
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
if (!ZIO_CHECKSUM_EQUAL(drre.drr_u.drr_end.drr_checksum, *zc)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incorrect header checksum"));
error = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
(void) nvlist_lookup_string(stream_nv, "fromsnap", &fromsnap);
if (drr->drr_payloadlen != 0) {
nvlist_t *stream_fss;
stream_fss = fnvlist_lookup_nvlist(stream_nv, "fss");
if ((stream_avl = fsavl_create(stream_fss)) == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"couldn't allocate avl tree"));
error = zfs_error(hdl, EZFS_NOMEM, errbuf);
goto out;
}
if (fromsnap != NULL && recursive) {
nvlist_t *renamed = NULL;
nvpair_t *pair = NULL;
(void) strlcpy(tofs, destname, sizeof (tofs));
if (flags->isprefix) {
struct drr_begin *drrb = &drr->drr_u.drr_begin;
int i;
if (flags->istail) {
cp = strrchr(drrb->drr_toname, '/');
if (cp == NULL) {
(void) strlcat(tofs, "/",
sizeof (tofs));
i = 0;
} else {
i = (cp - drrb->drr_toname);
}
} else {
i = strcspn(drrb->drr_toname, "/@");
}
/* zfs_receive_one() will create_parents() */
(void) strlcat(tofs, &drrb->drr_toname[i],
sizeof (tofs));
*strchr(tofs, '@') = '\0';
}
if (!flags->dryrun && !flags->nomount) {
renamed = fnvlist_alloc();
}
softerr = recv_incremental_replication(hdl, tofs, flags,
stream_nv, stream_avl, renamed);
/* Unmount renamed filesystems before receiving. */
while ((pair = nvlist_next_nvpair(renamed,
pair)) != NULL) {
zfs_handle_t *zhp;
prop_changelist_t *clp = NULL;
zhp = zfs_open(hdl, nvpair_name(pair),
ZFS_TYPE_FILESYSTEM);
if (zhp != NULL) {
clp = changelist_gather(zhp,
ZFS_PROP_MOUNTPOINT, 0,
flags->forceunmount ? MS_FORCE : 0);
zfs_close(zhp);
if (clp != NULL) {
softerr |=
changelist_prefix(clp);
changelist_free(clp);
}
}
}
fnvlist_free(renamed);
}
}
/*
* Get the fs specified by the first path in the stream (the top level
* specified by 'zfs send') and pass it to each invocation of
* zfs_receive_one().
*/
(void) strlcpy(sendfs, drr->drr_u.drr_begin.drr_toname,
sizeof (sendfs));
if ((cp = strchr(sendfs, '@')) != NULL) {
*cp = '\0';
/*
* Find the "sendsnap", the final snapshot in a replication
* stream. zfs_receive_one() handles certain errors
* differently, depending on if the contained stream is the
* last one or not.
*/
sendsnap = (cp + 1);
}
/* Finally, receive each contained stream */
do {
/*
* we should figure out if it has a recoverable
* error, in which case do a recv_skip() and drive on.
* Note, if we fail due to already having this guid,
* zfs_receive_one() will take care of it (ie,
* recv_skip() and return 0).
*/
error = zfs_receive_impl(hdl, destname, NULL, flags, fd,
sendfs, stream_nv, stream_avl, top_zfs, sendsnap, cmdprops);
if (error == ENODATA) {
error = 0;
break;
}
anyerr |= error;
} while (error == 0);
if (drr->drr_payloadlen != 0 && recursive && fromsnap != NULL) {
/*
* Now that we have the fs's they sent us, try the
* renames again.
*/
softerr = recv_incremental_replication(hdl, tofs, flags,
stream_nv, stream_avl, NULL);
}
if (raw && softerr == 0 && *top_zfs != NULL) {
softerr = recv_fix_encryption_hierarchy(hdl, *top_zfs,
stream_nv, stream_avl);
}
out:
fsavl_destroy(stream_avl);
fnvlist_free(stream_nv);
if (softerr)
error = -2;
if (anyerr)
error = -1;
return (error);
}
static void
trunc_prop_errs(int truncated)
{
ASSERT(truncated != 0);
if (truncated == 1)
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"1 more property could not be set\n"));
else
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"%d more properties could not be set\n"), truncated);
}
static int
recv_skip(libzfs_handle_t *hdl, int fd, boolean_t byteswap)
{
dmu_replay_record_t *drr;
void *buf = zfs_alloc(hdl, SPA_MAXBLOCKSIZE);
uint64_t payload_size;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
/* XXX would be great to use lseek if possible... */
drr = buf;
while (recv_read(hdl, fd, drr, sizeof (dmu_replay_record_t),
byteswap, NULL) == 0) {
if (byteswap)
drr->drr_type = BSWAP_32(drr->drr_type);
switch (drr->drr_type) {
case DRR_BEGIN:
if (drr->drr_payloadlen != 0) {
(void) recv_read(hdl, fd, buf,
drr->drr_payloadlen, B_FALSE, NULL);
}
break;
case DRR_END:
free(buf);
return (0);
case DRR_OBJECT:
if (byteswap) {
drr->drr_u.drr_object.drr_bonuslen =
BSWAP_32(drr->drr_u.drr_object.
drr_bonuslen);
drr->drr_u.drr_object.drr_raw_bonuslen =
BSWAP_32(drr->drr_u.drr_object.
drr_raw_bonuslen);
}
payload_size =
DRR_OBJECT_PAYLOAD_SIZE(&drr->drr_u.drr_object);
(void) recv_read(hdl, fd, buf, payload_size,
B_FALSE, NULL);
break;
case DRR_WRITE:
if (byteswap) {
drr->drr_u.drr_write.drr_logical_size =
BSWAP_64(
drr->drr_u.drr_write.drr_logical_size);
drr->drr_u.drr_write.drr_compressed_size =
BSWAP_64(
drr->drr_u.drr_write.drr_compressed_size);
}
payload_size =
DRR_WRITE_PAYLOAD_SIZE(&drr->drr_u.drr_write);
assert(payload_size <= SPA_MAXBLOCKSIZE);
(void) recv_read(hdl, fd, buf,
payload_size, B_FALSE, NULL);
break;
case DRR_SPILL:
if (byteswap) {
drr->drr_u.drr_spill.drr_length =
BSWAP_64(drr->drr_u.drr_spill.drr_length);
drr->drr_u.drr_spill.drr_compressed_size =
BSWAP_64(drr->drr_u.drr_spill.
drr_compressed_size);
}
payload_size =
DRR_SPILL_PAYLOAD_SIZE(&drr->drr_u.drr_spill);
(void) recv_read(hdl, fd, buf, payload_size,
B_FALSE, NULL);
break;
case DRR_WRITE_EMBEDDED:
if (byteswap) {
drr->drr_u.drr_write_embedded.drr_psize =
BSWAP_32(drr->drr_u.drr_write_embedded.
drr_psize);
}
(void) recv_read(hdl, fd, buf,
P2ROUNDUP(drr->drr_u.drr_write_embedded.drr_psize,
8), B_FALSE, NULL);
break;
case DRR_OBJECT_RANGE:
case DRR_WRITE_BYREF:
case DRR_FREEOBJECTS:
case DRR_FREE:
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid record type"));
free(buf);
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
}
free(buf);
return (-1);
}
static void
recv_ecksum_set_aux(libzfs_handle_t *hdl, const char *target_snap,
boolean_t resumable, boolean_t checksum)
{
char target_fs[ZFS_MAX_DATASET_NAME_LEN];
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, (checksum ?
"checksum mismatch" : "incomplete stream")));
if (!resumable)
return;
(void) strlcpy(target_fs, target_snap, sizeof (target_fs));
*strchr(target_fs, '@') = '\0';
zfs_handle_t *zhp = zfs_open(hdl, target_fs,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return;
char token_buf[ZFS_MAXPROPLEN];
int error = zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
token_buf, sizeof (token_buf),
NULL, NULL, 0, B_TRUE);
if (error == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"checksum mismatch or incomplete stream.\n"
"Partially received snapshot is saved.\n"
"A resuming stream can be generated on the sending "
"system by running:\n"
" zfs send -t %s"),
token_buf);
}
zfs_close(zhp);
}
/*
* Prepare a new nvlist of properties that are to override (-o) or be excluded
* (-x) from the received dataset
* recvprops: received properties from the send stream
* cmdprops: raw input properties from command line
* origprops: properties, both locally-set and received, currently set on the
* target dataset if it exists, NULL otherwise.
* oxprops: valid output override (-o) and excluded (-x) properties
*/
static int
zfs_setup_cmdline_props(libzfs_handle_t *hdl, zfs_type_t type,
char *fsname, boolean_t zoned, boolean_t recursive, boolean_t newfs,
boolean_t raw, boolean_t toplevel, nvlist_t *recvprops, nvlist_t *cmdprops,
nvlist_t *origprops, nvlist_t **oxprops, uint8_t **wkeydata_out,
uint_t *wkeylen_out, const char *errbuf)
{
nvpair_t *nvp;
nvlist_t *oprops, *voprops;
zfs_handle_t *zhp = NULL;
zpool_handle_t *zpool_hdl = NULL;
char *cp;
int ret = 0;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
if (nvlist_empty(cmdprops))
return (0); /* No properties to override or exclude */
*oxprops = fnvlist_alloc();
oprops = fnvlist_alloc();
strlcpy(namebuf, fsname, ZFS_MAX_DATASET_NAME_LEN);
/*
* Get our dataset handle. The target dataset may not exist yet.
*/
if (zfs_dataset_exists(hdl, namebuf, ZFS_TYPE_DATASET)) {
zhp = zfs_open(hdl, namebuf, ZFS_TYPE_DATASET);
if (zhp == NULL) {
ret = -1;
goto error;
}
}
/* open the zpool handle */
cp = strchr(namebuf, '/');
if (cp != NULL)
*cp = '\0';
zpool_hdl = zpool_open(hdl, namebuf);
if (zpool_hdl == NULL) {
ret = -1;
goto error;
}
/* restore namebuf to match fsname for later use */
if (cp != NULL)
*cp = '/';
/*
* first iteration: process excluded (-x) properties now and gather
* added (-o) properties to be later processed by zfs_valid_proplist()
*/
nvp = NULL;
while ((nvp = nvlist_next_nvpair(cmdprops, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
zfs_prop_t prop = zfs_name_to_prop(name);
/* "origin" is processed separately, don't handle it here */
if (prop == ZFS_PROP_ORIGIN)
continue;
/* raw streams can't override encryption properties */
if ((zfs_prop_encryption_key_param(prop) ||
prop == ZFS_PROP_ENCRYPTION) && raw) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption property '%s' cannot "
"be set or excluded for raw streams."), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/* incremental streams can only exclude encryption properties */
if ((zfs_prop_encryption_key_param(prop) ||
prop == ZFS_PROP_ENCRYPTION) && !newfs &&
nvpair_type(nvp) != DATA_TYPE_BOOLEAN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption property '%s' cannot "
"be set for incremental streams."), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
switch (nvpair_type(nvp)) {
case DATA_TYPE_BOOLEAN: /* -x property */
/*
* DATA_TYPE_BOOLEAN is the way we're asked to "exclude"
* a property: this is done by forcing an explicit
* inherit on the destination so the effective value is
* not the one we received from the send stream.
*/
if (!zfs_prop_valid_for_type(prop, type, B_FALSE) &&
!zfs_prop_user(name)) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN,
"Warning: %s: property '%s' does not "
"apply to datasets of this type\n"),
fsname, name);
continue;
}
/*
* We do this only if the property is not already
* locally-set, in which case its value will take
* priority over the received anyway.
*/
if (nvlist_exists(origprops, name)) {
nvlist_t *attrs;
char *source = NULL;
attrs = fnvlist_lookup_nvlist(origprops, name);
if (nvlist_lookup_string(attrs,
ZPROP_SOURCE, &source) == 0 &&
strcmp(source, ZPROP_SOURCE_VAL_RECVD) != 0)
continue;
}
/*
* We can't force an explicit inherit on non-inheritable
* properties: if we're asked to exclude this kind of
* values we remove them from "recvprops" input nvlist.
*/
if (!zfs_prop_inheritable(prop) &&
!zfs_prop_user(name) && /* can be inherited too */
nvlist_exists(recvprops, name))
fnvlist_remove(recvprops, name);
else
fnvlist_add_nvpair(*oxprops, nvp);
break;
case DATA_TYPE_STRING: /* -o property=value */
/*
* we're trying to override a property that does not
* make sense for this type of dataset, but we don't
* want to fail if the receive is recursive: this comes
* in handy when the send stream contains, for
* instance, a child ZVOL and we're trying to receive
* it with "-o atime=on"
*/
if (!zfs_prop_valid_for_type(prop, type, B_FALSE) &&
!zfs_prop_user(name)) {
if (recursive)
continue;
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' does not apply to datasets "
"of this type"), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
fnvlist_add_nvpair(oprops, nvp);
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' must be a string or boolean"), name);
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
}
if (toplevel) {
/* convert override strings properties to native */
if ((voprops = zfs_valid_proplist(hdl, ZFS_TYPE_DATASET,
oprops, zoned, zhp, zpool_hdl, B_FALSE, errbuf)) == NULL) {
ret = zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/*
* zfs_crypto_create() requires the parent name. Get it
* by truncating the fsname copy stored in namebuf.
*/
cp = strrchr(namebuf, '/');
if (cp != NULL)
*cp = '\0';
if (!raw && zfs_crypto_create(hdl, namebuf, voprops, NULL,
B_FALSE, wkeydata_out, wkeylen_out) != 0) {
fnvlist_free(voprops);
ret = zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
goto error;
}
/* second pass: process "-o" properties */
fnvlist_merge(*oxprops, voprops);
fnvlist_free(voprops);
} else {
/* override props on child dataset are inherited */
nvp = NULL;
while ((nvp = nvlist_next_nvpair(oprops, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
fnvlist_add_boolean(*oxprops, name);
}
}
error:
if (zhp != NULL)
zfs_close(zhp);
if (zpool_hdl != NULL)
zpool_close(zpool_hdl);
fnvlist_free(oprops);
return (ret);
}
/*
* Restores a backup of tosnap from the file descriptor specified by infd.
*/
static int
zfs_receive_one(libzfs_handle_t *hdl, int infd, const char *tosnap,
const char *originsnap, recvflags_t *flags, dmu_replay_record_t *drr,
dmu_replay_record_t *drr_noswap, const char *sendfs, nvlist_t *stream_nv,
avl_tree_t *stream_avl, char **top_zfs,
const char *finalsnap, nvlist_t *cmdprops)
{
time_t begin_time;
int ioctl_err, ioctl_errno, err;
char *cp;
struct drr_begin *drrb = &drr->drr_u.drr_begin;
char errbuf[1024];
const char *chopprefix;
boolean_t newfs = B_FALSE;
boolean_t stream_wantsnewfs, stream_resumingnewfs;
boolean_t newprops = B_FALSE;
uint64_t read_bytes = 0;
uint64_t errflags = 0;
uint64_t parent_snapguid = 0;
prop_changelist_t *clp = NULL;
nvlist_t *snapprops_nvlist = NULL;
nvlist_t *snapholds_nvlist = NULL;
zprop_errflags_t prop_errflags;
nvlist_t *prop_errors = NULL;
boolean_t recursive;
char *snapname = NULL;
char destsnap[MAXPATHLEN * 2];
char origin[MAXNAMELEN];
char name[MAXPATHLEN];
char tmp_keylocation[MAXNAMELEN];
nvlist_t *rcvprops = NULL; /* props received from the send stream */
nvlist_t *oxprops = NULL; /* override (-o) and exclude (-x) props */
nvlist_t *origprops = NULL; /* original props (if destination exists) */
zfs_type_t type;
boolean_t toplevel = B_FALSE;
boolean_t zoned = B_FALSE;
boolean_t hastoken = B_FALSE;
boolean_t redacted;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
begin_time = time(NULL);
bzero(origin, MAXNAMELEN);
bzero(tmp_keylocation, MAXNAMELEN);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
recursive = (nvlist_lookup_boolean(stream_nv, "not_recursive") ==
ENOENT);
/* Did the user request holds be skipped via zfs recv -k? */
boolean_t holds = flags->holds && !flags->skipholds;
if (stream_avl != NULL) {
char *keylocation = NULL;
nvlist_t *lookup = NULL;
nvlist_t *fs = fsavl_find(stream_avl, drrb->drr_toguid,
&snapname);
(void) nvlist_lookup_uint64(fs, "parentfromsnap",
&parent_snapguid);
err = nvlist_lookup_nvlist(fs, "props", &rcvprops);
if (err) {
rcvprops = fnvlist_alloc();
newprops = B_TRUE;
}
/*
* The keylocation property may only be set on encryption roots,
* but this dataset might not become an encryption root until
* recv_fix_encryption_hierarchy() is called. That function
* will fixup the keylocation anyway, so we temporarily unset
* the keylocation for now to avoid any errors from the receive
* ioctl.
*/
err = nvlist_lookup_string(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), &keylocation);
if (err == 0) {
strcpy(tmp_keylocation, keylocation);
(void) nvlist_remove_all(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION));
}
if (flags->canmountoff) {
fnvlist_add_uint64(rcvprops,
zfs_prop_to_name(ZFS_PROP_CANMOUNT), 0);
} else if (newprops) { /* nothing in rcvprops, eliminate it */
fnvlist_free(rcvprops);
rcvprops = NULL;
newprops = B_FALSE;
}
if (0 == nvlist_lookup_nvlist(fs, "snapprops", &lookup)) {
snapprops_nvlist = fnvlist_lookup_nvlist(lookup,
snapname);
}
if (holds) {
if (0 == nvlist_lookup_nvlist(fs, "snapholds",
&lookup)) {
snapholds_nvlist = fnvlist_lookup_nvlist(
lookup, snapname);
}
}
}
cp = NULL;
/*
* Determine how much of the snapshot name stored in the stream
* we are going to tack on to the name they specified on the
* command line, and how much we are going to chop off.
*
* If they specified a snapshot, chop the entire name stored in
* the stream.
*/
if (flags->istail) {
/*
* A filesystem was specified with -e. We want to tack on only
* the tail of the sent snapshot path.
*/
if (strchr(tosnap, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"argument - snapshot not allowed with -e"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
chopprefix = strrchr(sendfs, '/');
if (chopprefix == NULL) {
/*
* The tail is the poolname, so we need to
* prepend a path separator.
*/
int len = strlen(drrb->drr_toname);
cp = malloc(len + 2);
cp[0] = '/';
(void) strcpy(&cp[1], drrb->drr_toname);
chopprefix = cp;
} else {
chopprefix = drrb->drr_toname + (chopprefix - sendfs);
}
} else if (flags->isprefix) {
/*
* A filesystem was specified with -d. We want to tack on
* everything but the first element of the sent snapshot path
* (all but the pool name).
*/
if (strchr(tosnap, '@')) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"argument - snapshot not allowed with -d"));
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
chopprefix = strchr(drrb->drr_toname, '/');
if (chopprefix == NULL)
chopprefix = strchr(drrb->drr_toname, '@');
} else if (strchr(tosnap, '@') == NULL) {
/*
* If a filesystem was specified without -d or -e, we want to
* tack on everything after the fs specified by 'zfs send'.
*/
chopprefix = drrb->drr_toname + strlen(sendfs);
} else {
/* A snapshot was specified as an exact path (no -d or -e). */
if (recursive) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot specify snapshot name for multi-snapshot "
"stream"));
err = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
chopprefix = drrb->drr_toname + strlen(drrb->drr_toname);
}
ASSERT(strstr(drrb->drr_toname, sendfs) == drrb->drr_toname);
ASSERT(chopprefix > drrb->drr_toname || strchr(sendfs, '/') == NULL);
ASSERT(chopprefix <= drrb->drr_toname + strlen(drrb->drr_toname) ||
strchr(sendfs, '/') == NULL);
ASSERT(chopprefix[0] == '/' || chopprefix[0] == '@' ||
chopprefix[0] == '\0');
/*
* Determine name of destination snapshot.
*/
(void) strlcpy(destsnap, tosnap, sizeof (destsnap));
(void) strlcat(destsnap, chopprefix, sizeof (destsnap));
free(cp);
if (!zfs_name_valid(destsnap, ZFS_TYPE_SNAPSHOT)) {
err = zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto out;
}
/*
* Determine the name of the origin snapshot.
*/
if (originsnap) {
(void) strlcpy(origin, originsnap, sizeof (origin));
if (flags->verbose)
(void) printf("using provided clone origin %s\n",
origin);
} else if (drrb->drr_flags & DRR_FLAG_CLONE) {
if (guid_to_name(hdl, destsnap,
drrb->drr_fromguid, B_FALSE, origin) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"local origin for clone %s does not exist"),
destsnap);
err = zfs_error(hdl, EZFS_NOENT, errbuf);
goto out;
}
if (flags->verbose)
(void) printf("found clone origin %s\n", origin);
}
if ((DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_DEDUP)) {
(void) fprintf(stderr,
gettext("ERROR: \"zfs receive\" no longer supports "
"deduplicated send streams. Use\n"
"the \"zstream redup\" command to convert this stream "
"to a regular,\n"
"non-deduplicated stream.\n"));
err = zfs_error(hdl, EZFS_NOTSUP, errbuf);
goto out;
}
boolean_t resuming = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RESUMING;
boolean_t raw = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_RAW;
boolean_t embedded = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_EMBED_DATA;
stream_wantsnewfs = (drrb->drr_fromguid == 0 ||
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && !resuming;
stream_resumingnewfs = (drrb->drr_fromguid == 0 ||
(drrb->drr_flags & DRR_FLAG_CLONE) || originsnap) && resuming;
if (stream_wantsnewfs) {
/*
* if the parent fs does not exist, look for it based on
* the parent snap GUID
*/
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive new filesystem stream"));
(void) strcpy(name, destsnap);
cp = strrchr(name, '/');
if (cp)
*cp = '\0';
if (cp &&
!zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
char suffix[ZFS_MAX_DATASET_NAME_LEN];
(void) strcpy(suffix, strrchr(destsnap, '/'));
if (guid_to_name(hdl, name, parent_snapguid,
B_FALSE, destsnap) == 0) {
*strchr(destsnap, '@') = '\0';
(void) strcat(destsnap, suffix);
}
}
} else {
/*
* If the fs does not exist, look for it based on the
* fromsnap GUID.
*/
if (resuming) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot receive resume stream"));
} else {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot receive incremental stream"));
}
(void) strcpy(name, destsnap);
*strchr(name, '@') = '\0';
/*
* If the exact receive path was specified and this is the
* topmost path in the stream, then if the fs does not exist we
* should look no further.
*/
if ((flags->isprefix || (*(chopprefix = drrb->drr_toname +
strlen(sendfs)) != '\0' && *chopprefix != '@')) &&
!zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
char snap[ZFS_MAX_DATASET_NAME_LEN];
(void) strcpy(snap, strchr(destsnap, '@'));
if (guid_to_name(hdl, name, drrb->drr_fromguid,
B_FALSE, destsnap) == 0) {
*strchr(destsnap, '@') = '\0';
(void) strcat(destsnap, snap);
}
}
}
(void) strcpy(name, destsnap);
*strchr(name, '@') = '\0';
redacted = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo) &
DMU_BACKUP_FEATURE_REDACTED;
if (zfs_dataset_exists(hdl, name, ZFS_TYPE_DATASET)) {
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp;
boolean_t encrypted;
(void) strcpy(zc.zc_name, name);
/*
* Destination fs exists. It must be one of these cases:
* - an incremental send stream
* - the stream specifies a new fs (full stream or clone)
* and they want us to blow away the existing fs (and
* have therefore specified -F and removed any snapshots)
* - we are resuming a failed receive.
*/
if (stream_wantsnewfs) {
boolean_t is_volume = drrb->drr_type == DMU_OST_ZVOL;
if (!flags->force) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' exists\n"
"must specify -F to overwrite it"), name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (zfs_ioctl(hdl, ZFS_IOC_SNAPSHOT_LIST_NEXT,
&zc) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination has snapshots (eg. %s)\n"
"must destroy them to overwrite it"),
zc.zc_name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (is_volume && strrchr(name, '/') == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s is the root dataset\n"
"cannot overwrite with a ZVOL"),
name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
if (is_volume &&
zfs_ioctl(hdl, ZFS_IOC_DATASET_LIST_NEXT,
&zc) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination has children (eg. %s)\n"
"cannot overwrite with a ZVOL"),
zc.zc_name);
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
goto out;
}
}
if ((zhp = zfs_open(hdl, name,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) == NULL) {
err = -1;
goto out;
}
if (stream_wantsnewfs &&
zhp->zfs_dmustats.dds_origin[0]) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' is a clone\n"
"must destroy it to overwrite it"), name);
err = zfs_error(hdl, EZFS_EXISTS, errbuf);
goto out;
}
/*
* Raw sends can not be performed as an incremental on top
* of existing unencrypted datasets. zfs recv -F can't be
* used to blow away an existing encrypted filesystem. This
* is because it would require the dsl dir to point to the
* new key (or lack of a key) and the old key at the same
* time. The -F flag may still be used for deleting
* intermediate snapshots that would otherwise prevent the
* receive from working.
*/
encrypted = zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) !=
ZIO_CRYPT_OFF;
if (!stream_wantsnewfs && !encrypted && raw) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot perform raw receive on top of "
"existing unencrypted dataset"));
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (stream_wantsnewfs && flags->force &&
((raw && !encrypted) || encrypted)) {
zfs_close(zhp);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"zfs receive -F cannot be used to destroy an "
"encrypted filesystem or overwrite an "
"unencrypted one with an encrypted one"));
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (!flags->dryrun && zhp->zfs_type == ZFS_TYPE_FILESYSTEM &&
(stream_wantsnewfs || stream_resumingnewfs)) {
/* We can't do online recv in this case */
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
flags->forceunmount ? MS_FORCE : 0);
if (clp == NULL) {
zfs_close(zhp);
err = -1;
goto out;
}
if (changelist_prefix(clp) != 0) {
changelist_free(clp);
zfs_close(zhp);
err = -1;
goto out;
}
}
/*
* If we are resuming a newfs, set newfs here so that we will
* mount it if the recv succeeds this time. We can tell
* that it was a newfs on the first recv because the fs
* itself will be inconsistent (if the fs existed when we
* did the first recv, we would have received it into
* .../%recv).
*/
if (resuming && zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT))
newfs = B_TRUE;
/* we want to know if we're zoned when validating -o|-x props */
zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
/* may need this info later, get it now we have zhp around */
if (zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN, NULL, 0,
NULL, NULL, 0, B_TRUE) == 0)
hastoken = B_TRUE;
/* gather existing properties on destination */
origprops = fnvlist_alloc();
fnvlist_merge(origprops, zhp->zfs_props);
fnvlist_merge(origprops, zhp->zfs_user_props);
zfs_close(zhp);
} else {
zfs_handle_t *zhp;
/*
* Destination filesystem does not exist. Therefore we better
* be creating a new filesystem (either from a full backup, or
* a clone). It would therefore be invalid if the user
* specified only the pool name (i.e. if the destination name
* contained no slash character).
*/
cp = strrchr(name, '/');
if (!stream_wantsnewfs || cp == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination '%s' does not exist"), name);
err = zfs_error(hdl, EZFS_NOENT, errbuf);
goto out;
}
/*
* Trim off the final dataset component so we perform the
* recvbackup ioctl to the filesystems's parent.
*/
*cp = '\0';
if (flags->isprefix && !flags->istail && !flags->dryrun &&
create_parents(hdl, destsnap, strlen(tosnap)) != 0) {
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
/* validate parent */
zhp = zfs_open(hdl, name, ZFS_TYPE_DATASET);
if (zhp == NULL) {
err = zfs_error(hdl, EZFS_BADRESTORE, errbuf);
goto out;
}
if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent '%s' is not a filesystem"), name);
err = zfs_error(hdl, EZFS_WRONG_PARENT, errbuf);
zfs_close(zhp);
goto out;
}
zfs_close(zhp);
newfs = B_TRUE;
*cp = '/';
}
if (flags->verbose) {
(void) printf("%s %s stream of %s into %s\n",
flags->dryrun ? "would receive" : "receiving",
drrb->drr_fromguid ? "incremental" : "full",
drrb->drr_toname, destsnap);
(void) fflush(stdout);
}
/*
* If this is the top-level dataset, record it so we can use it
* for recursive operations later.
*/
if (top_zfs != NULL &&
(*top_zfs == NULL || strcmp(*top_zfs, name) == 0)) {
toplevel = B_TRUE;
if (*top_zfs == NULL)
*top_zfs = zfs_strdup(hdl, name);
}
if (drrb->drr_type == DMU_OST_ZVOL) {
type = ZFS_TYPE_VOLUME;
} else if (drrb->drr_type == DMU_OST_ZFS) {
type = ZFS_TYPE_FILESYSTEM;
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid record type: 0x%d"), drrb->drr_type);
err = zfs_error(hdl, EZFS_BADSTREAM, errbuf);
goto out;
}
if ((err = zfs_setup_cmdline_props(hdl, type, name, zoned, recursive,
stream_wantsnewfs, raw, toplevel, rcvprops, cmdprops, origprops,
&oxprops, &wkeydata, &wkeylen, errbuf)) != 0)
goto out;
/*
* When sending with properties (zfs send -p), the encryption property
* is not included because it is a SETONCE property and therefore
* treated as read only. However, we are always able to determine its
* value because raw sends will include it in the DRR_BDEGIN payload
* and non-raw sends with properties are not allowed for encrypted
* datasets. Therefore, if this is a non-raw properties stream, we can
* infer that the value should be ZIO_CRYPT_OFF and manually add that
* to the received properties.
*/
if (stream_wantsnewfs && !raw && rcvprops != NULL &&
!nvlist_exists(cmdprops, zfs_prop_to_name(ZFS_PROP_ENCRYPTION))) {
if (oxprops == NULL)
oxprops = fnvlist_alloc();
fnvlist_add_uint64(oxprops,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), ZIO_CRYPT_OFF);
}
if (flags->dryrun) {
void *buf = zfs_alloc(hdl, SPA_MAXBLOCKSIZE);
/*
* We have read the DRR_BEGIN record, but we have
* not yet read the payload. For non-dryrun sends
* this will be done by the kernel, so we must
* emulate that here, before attempting to read
* more records.
*/
err = recv_read(hdl, infd, buf, drr->drr_payloadlen,
flags->byteswap, NULL);
free(buf);
if (err != 0)
goto out;
err = recv_skip(hdl, infd, flags->byteswap);
goto out;
}
err = ioctl_err = lzc_receive_with_cmdprops(destsnap, rcvprops,
oxprops, wkeydata, wkeylen, origin, flags->force, flags->resumable,
raw, infd, drr_noswap, -1, &read_bytes, &errflags,
NULL, &prop_errors);
ioctl_errno = ioctl_err;
prop_errflags = errflags;
if (err == 0) {
nvpair_t *prop_err = NULL;
while ((prop_err = nvlist_next_nvpair(prop_errors,
prop_err)) != NULL) {
char tbuf[1024];
zfs_prop_t prop;
int intval;
prop = zfs_name_to_prop(nvpair_name(prop_err));
(void) nvpair_value_int32(prop_err, &intval);
if (strcmp(nvpair_name(prop_err),
ZPROP_N_MORE_ERRORS) == 0) {
trunc_prop_errs(intval);
break;
} else if (snapname == NULL || finalsnap == NULL ||
strcmp(finalsnap, snapname) == 0 ||
strcmp(nvpair_name(prop_err),
zfs_prop_to_name(ZFS_PROP_REFQUOTA)) != 0) {
/*
* Skip the special case of, for example,
* "refquota", errors on intermediate
* snapshots leading up to a final one.
* That's why we have all of the checks above.
*
* See zfs_ioctl.c's extract_delay_props() for
* a list of props which can fail on
* intermediate snapshots, but shouldn't
* affect the overall receive.
*/
(void) snprintf(tbuf, sizeof (tbuf),
dgettext(TEXT_DOMAIN,
"cannot receive %s property on %s"),
nvpair_name(prop_err), name);
zfs_setprop_error(hdl, prop, intval, tbuf);
}
}
}
if (err == 0 && snapprops_nvlist) {
zfs_cmd_t zc = {"\0"};
(void) strcpy(zc.zc_name, destsnap);
zc.zc_cookie = B_TRUE; /* received */
if (zcmd_write_src_nvlist(hdl, &zc, snapprops_nvlist) == 0) {
(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
zcmd_free_nvlists(&zc);
}
}
if (err == 0 && snapholds_nvlist) {
nvpair_t *pair;
nvlist_t *holds, *errors = NULL;
int cleanup_fd = -1;
VERIFY(0 == nvlist_alloc(&holds, 0, KM_SLEEP));
for (pair = nvlist_next_nvpair(snapholds_nvlist, NULL);
pair != NULL;
pair = nvlist_next_nvpair(snapholds_nvlist, pair)) {
fnvlist_add_string(holds, destsnap, nvpair_name(pair));
}
(void) lzc_hold(holds, cleanup_fd, &errors);
fnvlist_free(snapholds_nvlist);
fnvlist_free(holds);
}
if (err && (ioctl_errno == ENOENT || ioctl_errno == EEXIST)) {
/*
* It may be that this snapshot already exists,
* in which case we want to consume & ignore it
* rather than failing.
*/
avl_tree_t *local_avl;
nvlist_t *local_nv, *fs;
cp = strchr(destsnap, '@');
/*
* XXX Do this faster by just iterating over snaps in
* this fs. Also if zc_value does not exist, we will
* get a strange "does not exist" error message.
*/
*cp = '\0';
if (gather_nvlist(hdl, destsnap, NULL, NULL, B_FALSE, B_TRUE,
B_FALSE, B_FALSE, B_FALSE, B_FALSE, B_FALSE, B_FALSE,
B_TRUE, &local_nv, &local_avl) == 0) {
*cp = '@';
fs = fsavl_find(local_avl, drrb->drr_toguid, NULL);
fsavl_destroy(local_avl);
fnvlist_free(local_nv);
if (fs != NULL) {
if (flags->verbose) {
(void) printf("snap %s already exists; "
"ignoring\n", destsnap);
}
err = ioctl_err = recv_skip(hdl, infd,
flags->byteswap);
}
}
*cp = '@';
}
if (ioctl_err != 0) {
switch (ioctl_errno) {
case ENODEV:
cp = strchr(destsnap, '@');
*cp = '\0';
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"most recent snapshot of %s does not\n"
"match incremental source"), destsnap);
(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
*cp = '@';
break;
case ETXTBSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s has been modified\n"
"since most recent snapshot"), name);
(void) zfs_error(hdl, EZFS_BADRESTORE, errbuf);
break;
case EACCES:
if (raw && stream_wantsnewfs) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to create encryption key"));
} else if (raw && !stream_wantsnewfs) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption key does not match "
"existing key"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"inherited key must be loaded"));
}
(void) zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
break;
case EEXIST:
cp = strchr(destsnap, '@');
if (newfs) {
/* it's the containing fs that exists */
*cp = '\0';
}
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination already exists"));
(void) zfs_error_fmt(hdl, EZFS_EXISTS,
dgettext(TEXT_DOMAIN, "cannot restore to %s"),
destsnap);
*cp = '@';
break;
case EINVAL:
if (flags->resumable) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"kernel modules must be upgraded to "
"receive this stream."));
} else if (embedded && !raw) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incompatible embedded data stream "
"feature with encrypted receive."));
}
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ECKSUM:
case ZFS_ERR_STREAM_TRUNCATED:
recv_ecksum_set_aux(hdl, destsnap, flags->resumable,
ioctl_err == ECKSUM);
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_STREAM_LARGE_BLOCK_MISMATCH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"incremental send stream requires -L "
"(--large-block), to match previous receive."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to receive this stream."));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EDQUOT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s space quota exceeded."), name);
(void) zfs_error(hdl, EZFS_NOSPC, errbuf);
break;
case ZFS_ERR_FROM_IVSET_GUID_MISSING:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"IV set guid missing. See errata %u at "
"https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-ER."),
ZPOOL_ERRATA_ZOL_8308_ENCRYPTION);
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_FROM_IVSET_GUID_MISMATCH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"IV set guid mismatch. See the 'zfs receive' "
"man page section\n discussing the limitations "
"of raw encrypted send streams."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case ZFS_ERR_SPILL_BLOCK_FLAG_MISSING:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Spill block flag missing for raw send.\n"
"The zfs software on the sending system must "
"be updated."));
(void) zfs_error(hdl, EZFS_BADSTREAM, errbuf);
break;
case EBUSY:
if (hastoken) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"destination %s contains "
"partially-complete state from "
"\"zfs receive -s\"."), name);
(void) zfs_error(hdl, EZFS_BUSY, errbuf);
break;
}
- /* fallthru */
+ fallthrough;
default:
(void) zfs_standard_error(hdl, ioctl_errno, errbuf);
}
}
/*
* Mount the target filesystem (if created). Also mount any
* children of the target filesystem if we did a replication
* receive (indicated by stream_avl being non-NULL).
*/
if (clp) {
if (!flags->nomount)
err |= changelist_postfix(clp);
changelist_free(clp);
}
if ((newfs || stream_avl) && type == ZFS_TYPE_FILESYSTEM && !redacted)
flags->domount = B_TRUE;
if (prop_errflags & ZPROP_ERR_NOCLEAR) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
"failed to clear unreceived properties on %s"), name);
(void) fprintf(stderr, "\n");
}
if (prop_errflags & ZPROP_ERR_NORESTORE) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Warning: "
"failed to restore original properties on %s"), name);
(void) fprintf(stderr, "\n");
}
if (err || ioctl_err) {
err = -1;
goto out;
}
if (flags->verbose) {
char buf1[64];
char buf2[64];
uint64_t bytes = read_bytes;
time_t delta = time(NULL) - begin_time;
if (delta == 0)
delta = 1;
zfs_nicebytes(bytes, buf1, sizeof (buf1));
zfs_nicebytes(bytes/delta, buf2, sizeof (buf1));
(void) printf("received %s stream in %lld seconds (%s/sec)\n",
buf1, (longlong_t)delta, buf2);
}
err = 0;
out:
if (prop_errors != NULL)
fnvlist_free(prop_errors);
if (tmp_keylocation[0] != '\0') {
fnvlist_add_string(rcvprops,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), tmp_keylocation);
}
if (newprops)
fnvlist_free(rcvprops);
fnvlist_free(oxprops);
fnvlist_free(origprops);
return (err);
}
/*
* Check properties we were asked to override (both -o|-x)
*/
static boolean_t
zfs_receive_checkprops(libzfs_handle_t *hdl, nvlist_t *props,
const char *errbuf)
{
nvpair_t *nvp;
zfs_prop_t prop;
const char *name;
nvp = NULL;
while ((nvp = nvlist_next_nvpair(props, nvp)) != NULL) {
name = nvpair_name(nvp);
prop = zfs_name_to_prop(name);
if (prop == ZPROP_INVAL) {
if (!zfs_prop_user(name)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), name);
return (B_FALSE);
}
continue;
}
/*
* "origin" is readonly but is used to receive datasets as
* clones so we don't raise an error here
*/
if (prop == ZFS_PROP_ORIGIN)
continue;
/* encryption params have their own verification later */
if (prop == ZFS_PROP_ENCRYPTION ||
zfs_prop_encryption_key_param(prop))
continue;
/*
* cannot override readonly, set-once and other specific
* settable properties
*/
if (zfs_prop_readonly(prop) || prop == ZFS_PROP_VERSION ||
prop == ZFS_PROP_VOLSIZE) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), name);
return (B_FALSE);
}
}
return (B_TRUE);
}
static int
zfs_receive_impl(libzfs_handle_t *hdl, const char *tosnap,
const char *originsnap, recvflags_t *flags, int infd, const char *sendfs,
nvlist_t *stream_nv, avl_tree_t *stream_avl, char **top_zfs,
const char *finalsnap, nvlist_t *cmdprops)
{
int err;
dmu_replay_record_t drr, drr_noswap;
struct drr_begin *drrb = &drr.drr_u.drr_begin;
char errbuf[1024];
zio_cksum_t zcksum = { { 0 } };
uint64_t featureflags;
int hdrtype;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot receive"));
/* check cmdline props, raise an error if they cannot be received */
if (!zfs_receive_checkprops(hdl, cmdprops, errbuf)) {
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
if (flags->isprefix &&
!zfs_dataset_exists(hdl, tosnap, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified fs "
"(%s) does not exist"), tosnap);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
if (originsnap &&
!zfs_dataset_exists(hdl, originsnap, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "specified origin fs "
"(%s) does not exist"), originsnap);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
/* read in the BEGIN record */
if (0 != (err = recv_read(hdl, infd, &drr, sizeof (drr), B_FALSE,
&zcksum)))
return (err);
if (drr.drr_type == DRR_END || drr.drr_type == BSWAP_32(DRR_END)) {
/* It's the double end record at the end of a package */
return (ENODATA);
}
/* the kernel needs the non-byteswapped begin record */
drr_noswap = drr;
flags->byteswap = B_FALSE;
if (drrb->drr_magic == BSWAP_64(DMU_BACKUP_MAGIC)) {
/*
* We computed the checksum in the wrong byteorder in
* recv_read() above; do it again correctly.
*/
bzero(&zcksum, sizeof (zio_cksum_t));
fletcher_4_incremental_byteswap(&drr, sizeof (drr), &zcksum);
flags->byteswap = B_TRUE;
drr.drr_type = BSWAP_32(drr.drr_type);
drr.drr_payloadlen = BSWAP_32(drr.drr_payloadlen);
drrb->drr_magic = BSWAP_64(drrb->drr_magic);
drrb->drr_versioninfo = BSWAP_64(drrb->drr_versioninfo);
drrb->drr_creation_time = BSWAP_64(drrb->drr_creation_time);
drrb->drr_type = BSWAP_32(drrb->drr_type);
drrb->drr_flags = BSWAP_32(drrb->drr_flags);
drrb->drr_toguid = BSWAP_64(drrb->drr_toguid);
drrb->drr_fromguid = BSWAP_64(drrb->drr_fromguid);
}
if (drrb->drr_magic != DMU_BACKUP_MAGIC || drr.drr_type != DRR_BEGIN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (bad magic number)"));
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
featureflags = DMU_GET_FEATUREFLAGS(drrb->drr_versioninfo);
hdrtype = DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo);
if (!DMU_STREAM_SUPPORTED(featureflags) ||
(hdrtype != DMU_SUBSTREAM && hdrtype != DMU_COMPOUNDSTREAM)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"stream has unsupported feature, feature flags = %llx"),
(unsigned long long)featureflags);
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
/* Holds feature is set once in the compound stream header. */
if (featureflags & DMU_BACKUP_FEATURE_HOLDS)
flags->holds = B_TRUE;
if (strchr(drrb->drr_toname, '@') == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "invalid "
"stream (bad snapshot name)"));
return (zfs_error(hdl, EZFS_BADSTREAM, errbuf));
}
if (DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) == DMU_SUBSTREAM) {
char nonpackage_sendfs[ZFS_MAX_DATASET_NAME_LEN];
if (sendfs == NULL) {
/*
* We were not called from zfs_receive_package(). Get
* the fs specified by 'zfs send'.
*/
char *cp;
(void) strlcpy(nonpackage_sendfs,
drr.drr_u.drr_begin.drr_toname,
sizeof (nonpackage_sendfs));
if ((cp = strchr(nonpackage_sendfs, '@')) != NULL)
*cp = '\0';
sendfs = nonpackage_sendfs;
VERIFY(finalsnap == NULL);
}
return (zfs_receive_one(hdl, infd, tosnap, originsnap, flags,
&drr, &drr_noswap, sendfs, stream_nv, stream_avl, top_zfs,
finalsnap, cmdprops));
} else {
assert(DMU_GET_STREAM_HDRTYPE(drrb->drr_versioninfo) ==
DMU_COMPOUNDSTREAM);
return (zfs_receive_package(hdl, infd, tosnap, flags, &drr,
&zcksum, top_zfs, cmdprops));
}
}
/*
* Restores a backup of tosnap from the file descriptor specified by infd.
* Return 0 on total success, -2 if some things couldn't be
* destroyed/renamed/promoted, -1 if some things couldn't be received.
* (-1 will override -2, if -1 and the resumable flag was specified the
* transfer can be resumed if the sending side supports it).
*/
int
zfs_receive(libzfs_handle_t *hdl, const char *tosnap, nvlist_t *props,
recvflags_t *flags, int infd, avl_tree_t *stream_avl)
{
char *top_zfs = NULL;
int err;
struct stat sb;
char *originsnap = NULL;
/*
* The only way fstat can fail is if we do not have a valid file
* descriptor.
*/
if (fstat(infd, &sb) == -1) {
perror("fstat");
return (-2);
}
/*
* It is not uncommon for gigabytes to be processed in zfs receive.
* Speculatively increase the buffer size if supported by the platform.
*/
if (S_ISFIFO(sb.st_mode))
libzfs_set_pipe_max(infd);
if (props) {
err = nvlist_lookup_string(props, "origin", &originsnap);
if (err && err != ENOENT)
return (err);
}
err = zfs_receive_impl(hdl, tosnap, originsnap, flags, infd, NULL, NULL,
stream_avl, &top_zfs, NULL, props);
if (err == 0 && !flags->nomount && flags->domount && top_zfs) {
zfs_handle_t *zhp = NULL;
prop_changelist_t *clp = NULL;
zhp = zfs_open(hdl, top_zfs,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
err = -1;
goto out;
} else {
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
zfs_close(zhp);
goto out;
}
clp = changelist_gather(zhp, ZFS_PROP_MOUNTPOINT,
CL_GATHER_MOUNT_ALWAYS,
flags->forceunmount ? MS_FORCE : 0);
zfs_close(zhp);
if (clp == NULL) {
err = -1;
goto out;
}
/* mount and share received datasets */
err = changelist_postfix(clp);
changelist_free(clp);
if (err != 0)
err = -1;
}
}
out:
if (top_zfs)
free(top_zfs);
return (err);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/libzfs_util.c b/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
index 88d6561a5fb4..c3c009ae3a10 100644
--- a/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
+++ b/sys/contrib/openzfs/lib/libzfs/libzfs_util.c
@@ -1,2083 +1,2083 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2020 Joyent, Inc. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2020 The FreeBSD Foundation
*
* Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
/*
* Internal utility routines for the ZFS library.
*/
#include <errno.h>
#include <fcntl.h>
#include <libintl.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <math.h>
#if LIBFETCH_DYNAMIC
#include <dlfcn.h>
#endif
#include <sys/stat.h>
#include <sys/mnttab.h>
#include <sys/mntent.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include "libzfs_impl.h"
#include "zfs_prop.h"
#include "zfeature_common.h"
#include <zfs_fletcher.h>
#include <libzutil.h>
/*
* We only care about the scheme in order to match the scheme
* with the handler. Each handler should validate the full URI
* as necessary.
*/
#define URI_REGEX "^\\([A-Za-z][A-Za-z0-9+.\\-]*\\):"
int
libzfs_errno(libzfs_handle_t *hdl)
{
return (hdl->libzfs_error);
}
const char *
libzfs_error_action(libzfs_handle_t *hdl)
{
return (hdl->libzfs_action);
}
const char *
libzfs_error_description(libzfs_handle_t *hdl)
{
if (hdl->libzfs_desc[0] != '\0')
return (hdl->libzfs_desc);
switch (hdl->libzfs_error) {
case EZFS_NOMEM:
return (dgettext(TEXT_DOMAIN, "out of memory"));
case EZFS_BADPROP:
return (dgettext(TEXT_DOMAIN, "invalid property value"));
case EZFS_PROPREADONLY:
return (dgettext(TEXT_DOMAIN, "read-only property"));
case EZFS_PROPTYPE:
return (dgettext(TEXT_DOMAIN, "property doesn't apply to "
"datasets of this type"));
case EZFS_PROPNONINHERIT:
return (dgettext(TEXT_DOMAIN, "property cannot be inherited"));
case EZFS_PROPSPACE:
return (dgettext(TEXT_DOMAIN, "invalid quota or reservation"));
case EZFS_BADTYPE:
return (dgettext(TEXT_DOMAIN, "operation not applicable to "
"datasets of this type"));
case EZFS_BUSY:
return (dgettext(TEXT_DOMAIN, "pool or dataset is busy"));
case EZFS_EXISTS:
return (dgettext(TEXT_DOMAIN, "pool or dataset exists"));
case EZFS_NOENT:
return (dgettext(TEXT_DOMAIN, "no such pool or dataset"));
case EZFS_BADSTREAM:
return (dgettext(TEXT_DOMAIN, "invalid backup stream"));
case EZFS_DSREADONLY:
return (dgettext(TEXT_DOMAIN, "dataset is read-only"));
case EZFS_VOLTOOBIG:
return (dgettext(TEXT_DOMAIN, "volume size exceeds limit for "
"this system"));
case EZFS_INVALIDNAME:
return (dgettext(TEXT_DOMAIN, "invalid name"));
case EZFS_BADRESTORE:
return (dgettext(TEXT_DOMAIN, "unable to restore to "
"destination"));
case EZFS_BADBACKUP:
return (dgettext(TEXT_DOMAIN, "backup failed"));
case EZFS_BADTARGET:
return (dgettext(TEXT_DOMAIN, "invalid target vdev"));
case EZFS_NODEVICE:
return (dgettext(TEXT_DOMAIN, "no such device in pool"));
case EZFS_BADDEV:
return (dgettext(TEXT_DOMAIN, "invalid device"));
case EZFS_NOREPLICAS:
return (dgettext(TEXT_DOMAIN, "no valid replicas"));
case EZFS_RESILVERING:
return (dgettext(TEXT_DOMAIN, "currently resilvering"));
case EZFS_BADVERSION:
return (dgettext(TEXT_DOMAIN, "unsupported version or "
"feature"));
case EZFS_POOLUNAVAIL:
return (dgettext(TEXT_DOMAIN, "pool is unavailable"));
case EZFS_DEVOVERFLOW:
return (dgettext(TEXT_DOMAIN, "too many devices in one vdev"));
case EZFS_BADPATH:
return (dgettext(TEXT_DOMAIN, "must be an absolute path"));
case EZFS_CROSSTARGET:
return (dgettext(TEXT_DOMAIN, "operation crosses datasets or "
"pools"));
case EZFS_ZONED:
return (dgettext(TEXT_DOMAIN, "dataset in use by local zone"));
case EZFS_MOUNTFAILED:
return (dgettext(TEXT_DOMAIN, "mount failed"));
case EZFS_UMOUNTFAILED:
return (dgettext(TEXT_DOMAIN, "unmount failed"));
case EZFS_UNSHARENFSFAILED:
return (dgettext(TEXT_DOMAIN, "NFS share removal failed"));
case EZFS_SHARENFSFAILED:
return (dgettext(TEXT_DOMAIN, "NFS share creation failed"));
case EZFS_UNSHARESMBFAILED:
return (dgettext(TEXT_DOMAIN, "SMB share removal failed"));
case EZFS_SHARESMBFAILED:
return (dgettext(TEXT_DOMAIN, "SMB share creation failed"));
case EZFS_PERM:
return (dgettext(TEXT_DOMAIN, "permission denied"));
case EZFS_NOSPC:
return (dgettext(TEXT_DOMAIN, "out of space"));
case EZFS_FAULT:
return (dgettext(TEXT_DOMAIN, "bad address"));
case EZFS_IO:
return (dgettext(TEXT_DOMAIN, "I/O error"));
case EZFS_INTR:
return (dgettext(TEXT_DOMAIN, "signal received"));
case EZFS_ISSPARE:
return (dgettext(TEXT_DOMAIN, "device is reserved as a hot "
"spare"));
case EZFS_INVALCONFIG:
return (dgettext(TEXT_DOMAIN, "invalid vdev configuration"));
case EZFS_RECURSIVE:
return (dgettext(TEXT_DOMAIN, "recursive dataset dependency"));
case EZFS_NOHISTORY:
return (dgettext(TEXT_DOMAIN, "no history available"));
case EZFS_POOLPROPS:
return (dgettext(TEXT_DOMAIN, "failed to retrieve "
"pool properties"));
case EZFS_POOL_NOTSUP:
return (dgettext(TEXT_DOMAIN, "operation not supported "
"on this type of pool"));
case EZFS_POOL_INVALARG:
return (dgettext(TEXT_DOMAIN, "invalid argument for "
"this pool operation"));
case EZFS_NAMETOOLONG:
return (dgettext(TEXT_DOMAIN, "dataset name is too long"));
case EZFS_OPENFAILED:
return (dgettext(TEXT_DOMAIN, "open failed"));
case EZFS_NOCAP:
return (dgettext(TEXT_DOMAIN,
"disk capacity information could not be retrieved"));
case EZFS_LABELFAILED:
return (dgettext(TEXT_DOMAIN, "write of label failed"));
case EZFS_BADWHO:
return (dgettext(TEXT_DOMAIN, "invalid user/group"));
case EZFS_BADPERM:
return (dgettext(TEXT_DOMAIN, "invalid permission"));
case EZFS_BADPERMSET:
return (dgettext(TEXT_DOMAIN, "invalid permission set name"));
case EZFS_NODELEGATION:
return (dgettext(TEXT_DOMAIN, "delegated administration is "
"disabled on pool"));
case EZFS_BADCACHE:
return (dgettext(TEXT_DOMAIN, "invalid or missing cache file"));
case EZFS_ISL2CACHE:
return (dgettext(TEXT_DOMAIN, "device is in use as a cache"));
case EZFS_VDEVNOTSUP:
return (dgettext(TEXT_DOMAIN, "vdev specification is not "
"supported"));
case EZFS_NOTSUP:
return (dgettext(TEXT_DOMAIN, "operation not supported "
"on this dataset"));
case EZFS_IOC_NOTSUPPORTED:
return (dgettext(TEXT_DOMAIN, "operation not supported by "
"zfs kernel module"));
case EZFS_ACTIVE_SPARE:
return (dgettext(TEXT_DOMAIN, "pool has active shared spare "
"device"));
case EZFS_UNPLAYED_LOGS:
return (dgettext(TEXT_DOMAIN, "log device has unplayed intent "
"logs"));
case EZFS_REFTAG_RELE:
return (dgettext(TEXT_DOMAIN, "no such tag on this dataset"));
case EZFS_REFTAG_HOLD:
return (dgettext(TEXT_DOMAIN, "tag already exists on this "
"dataset"));
case EZFS_TAGTOOLONG:
return (dgettext(TEXT_DOMAIN, "tag too long"));
case EZFS_PIPEFAILED:
return (dgettext(TEXT_DOMAIN, "pipe create failed"));
case EZFS_THREADCREATEFAILED:
return (dgettext(TEXT_DOMAIN, "thread create failed"));
case EZFS_POSTSPLIT_ONLINE:
return (dgettext(TEXT_DOMAIN, "disk was split from this pool "
"into a new one"));
case EZFS_SCRUB_PAUSED:
return (dgettext(TEXT_DOMAIN, "scrub is paused; "
"use 'zpool scrub' to resume"));
case EZFS_SCRUBBING:
return (dgettext(TEXT_DOMAIN, "currently scrubbing; "
"use 'zpool scrub -s' to cancel current scrub"));
case EZFS_NO_SCRUB:
return (dgettext(TEXT_DOMAIN, "there is no active scrub"));
case EZFS_DIFF:
return (dgettext(TEXT_DOMAIN, "unable to generate diffs"));
case EZFS_DIFFDATA:
return (dgettext(TEXT_DOMAIN, "invalid diff data"));
case EZFS_POOLREADONLY:
return (dgettext(TEXT_DOMAIN, "pool is read-only"));
case EZFS_NO_PENDING:
return (dgettext(TEXT_DOMAIN, "operation is not "
"in progress"));
case EZFS_CHECKPOINT_EXISTS:
return (dgettext(TEXT_DOMAIN, "checkpoint exists"));
case EZFS_DISCARDING_CHECKPOINT:
return (dgettext(TEXT_DOMAIN, "currently discarding "
"checkpoint"));
case EZFS_NO_CHECKPOINT:
return (dgettext(TEXT_DOMAIN, "checkpoint does not exist"));
case EZFS_DEVRM_IN_PROGRESS:
return (dgettext(TEXT_DOMAIN, "device removal in progress"));
case EZFS_VDEV_TOO_BIG:
return (dgettext(TEXT_DOMAIN, "device exceeds supported size"));
case EZFS_ACTIVE_POOL:
return (dgettext(TEXT_DOMAIN, "pool is imported on a "
"different host"));
case EZFS_CRYPTOFAILED:
return (dgettext(TEXT_DOMAIN, "encryption failure"));
case EZFS_TOOMANY:
return (dgettext(TEXT_DOMAIN, "argument list too long"));
case EZFS_INITIALIZING:
return (dgettext(TEXT_DOMAIN, "currently initializing"));
case EZFS_NO_INITIALIZE:
return (dgettext(TEXT_DOMAIN, "there is no active "
"initialization"));
case EZFS_WRONG_PARENT:
return (dgettext(TEXT_DOMAIN, "invalid parent dataset"));
case EZFS_TRIMMING:
return (dgettext(TEXT_DOMAIN, "currently trimming"));
case EZFS_NO_TRIM:
return (dgettext(TEXT_DOMAIN, "there is no active trim"));
case EZFS_TRIM_NOTSUP:
return (dgettext(TEXT_DOMAIN, "trim operations are not "
"supported by this device"));
case EZFS_NO_RESILVER_DEFER:
return (dgettext(TEXT_DOMAIN, "this action requires the "
"resilver_defer feature"));
case EZFS_EXPORT_IN_PROGRESS:
return (dgettext(TEXT_DOMAIN, "pool export in progress"));
case EZFS_REBUILDING:
return (dgettext(TEXT_DOMAIN, "currently sequentially "
"resilvering"));
case EZFS_UNKNOWN:
return (dgettext(TEXT_DOMAIN, "unknown error"));
default:
assert(hdl->libzfs_error == 0);
return (dgettext(TEXT_DOMAIN, "no error"));
}
}
void
zfs_error_aux(libzfs_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) vsnprintf(hdl->libzfs_desc, sizeof (hdl->libzfs_desc),
fmt, ap);
hdl->libzfs_desc_active = 1;
va_end(ap);
}
static void
zfs_verror(libzfs_handle_t *hdl, int error, const char *fmt, va_list ap)
{
(void) vsnprintf(hdl->libzfs_action, sizeof (hdl->libzfs_action),
fmt, ap);
hdl->libzfs_error = error;
if (hdl->libzfs_desc_active)
hdl->libzfs_desc_active = 0;
else
hdl->libzfs_desc[0] = '\0';
if (hdl->libzfs_printerr) {
if (error == EZFS_UNKNOWN) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "internal "
"error: %s: %s\n"), hdl->libzfs_action,
libzfs_error_description(hdl));
abort();
}
(void) fprintf(stderr, "%s: %s\n", hdl->libzfs_action,
libzfs_error_description(hdl));
if (error == EZFS_NOMEM)
exit(1);
}
}
int
zfs_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zfs_error_fmt(hdl, error, "%s", msg));
}
int
zfs_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
zfs_verror(hdl, error, fmt, ap);
va_end(ap);
return (-1);
}
static int
zfs_common_error(libzfs_handle_t *hdl, int error, const char *fmt,
va_list ap)
{
switch (error) {
case EPERM:
case EACCES:
zfs_verror(hdl, EZFS_PERM, fmt, ap);
return (-1);
case ECANCELED:
zfs_verror(hdl, EZFS_NODELEGATION, fmt, ap);
return (-1);
case EIO:
zfs_verror(hdl, EZFS_IO, fmt, ap);
return (-1);
case EFAULT:
zfs_verror(hdl, EZFS_FAULT, fmt, ap);
return (-1);
case EINTR:
zfs_verror(hdl, EZFS_INTR, fmt, ap);
return (-1);
}
return (0);
}
int
zfs_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zfs_standard_error_fmt(hdl, error, "%s", msg));
}
int
zfs_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if (zfs_common_error(hdl, error, fmt, ap) != 0) {
va_end(ap);
return (-1);
}
switch (error) {
case ENXIO:
case ENODEV:
case EPIPE:
zfs_verror(hdl, EZFS_IO, fmt, ap);
break;
case ENOENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset does not exist"));
zfs_verror(hdl, EZFS_NOENT, fmt, ap);
break;
case ENOSPC:
case EDQUOT:
zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
break;
case EEXIST:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset already exists"));
zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is busy"));
zfs_verror(hdl, EZFS_BUSY, fmt, ap);
break;
case EROFS:
zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
break;
case ENAMETOOLONG:
zfs_verror(hdl, EZFS_NAMETOOLONG, fmt, ap);
break;
case ENOTSUP:
zfs_verror(hdl, EZFS_BADVERSION, fmt, ap);
break;
case EAGAIN:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool I/O is currently suspended"));
zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
break;
case EREMOTEIO:
zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap);
break;
case ZFS_ERR_UNKNOWN_SEND_STREAM_FEATURE:
case ZFS_ERR_IOC_CMD_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support this operation. A reboot may "
"be required to enable this operation."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support an option for this operation. "
"A reboot may be required to enable this option."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_REQUIRED:
case ZFS_ERR_IOC_ARG_BADTYPE:
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_WRONG_PARENT:
zfs_verror(hdl, EZFS_WRONG_PARENT, fmt, ap);
break;
case ZFS_ERR_BADPROP:
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
break;
default:
zfs_error_aux(hdl, "%s", strerror(error));
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
break;
}
va_end(ap);
return (-1);
}
void
zfs_setprop_error(libzfs_handle_t *hdl, zfs_prop_t prop, int err,
char *errbuf)
{
switch (err) {
case ENOSPC:
/*
* For quotas and reservations, ENOSPC indicates
* something different; setting a quota or reservation
* doesn't use any disk space.
*/
switch (prop) {
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is less than current used or "
"reserved space"));
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
break;
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is greater than available space"));
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
break;
default:
(void) zfs_standard_error(hdl, err, errbuf);
break;
}
break;
case EBUSY:
(void) zfs_standard_error(hdl, EBUSY, errbuf);
break;
case EROFS:
(void) zfs_error(hdl, EZFS_DSREADONLY, errbuf);
break;
case E2BIG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property value too long"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool and or dataset must be upgraded to set this "
"property or value"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case ERANGE:
if (prop == ZFS_PROP_COMPRESSION ||
prop == ZFS_PROP_DNODESIZE ||
prop == ZFS_PROP_RECORDSIZE) {
(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property setting is not allowed on "
"bootable datasets"));
(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
} else if (prop == ZFS_PROP_CHECKSUM ||
prop == ZFS_PROP_DEDUP) {
(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property setting is not allowed on "
"root pools"));
(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case EINVAL:
if (prop == ZPROP_INVAL) {
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case ZFS_ERR_BADPROP:
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
break;
case EACCES:
if (prop == ZFS_PROP_KEYLOCATION) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation may only be set on encryption roots"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case EOVERFLOW:
/*
* This platform can't address a volume this big.
*/
#ifdef _ILP32
if (prop == ZFS_PROP_VOLSIZE) {
(void) zfs_error(hdl, EZFS_VOLTOOBIG, errbuf);
break;
}
#endif
- /* FALLTHROUGH */
+ fallthrough;
default:
(void) zfs_standard_error(hdl, err, errbuf);
}
}
int
zpool_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zpool_standard_error_fmt(hdl, error, "%s", msg));
}
int
zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if (zfs_common_error(hdl, error, fmt, ap) != 0) {
va_end(ap);
return (-1);
}
switch (error) {
case ENODEV:
zfs_verror(hdl, EZFS_NODEVICE, fmt, ap);
break;
case ENOENT:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "no such pool or dataset"));
zfs_verror(hdl, EZFS_NOENT, fmt, ap);
break;
case EEXIST:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool already exists"));
zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool is busy"));
zfs_verror(hdl, EZFS_BUSY, fmt, ap);
break;
/* There is no pending operation to cancel */
case ENOTACTIVE:
zfs_verror(hdl, EZFS_NO_PENDING, fmt, ap);
break;
case ENXIO:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is currently unavailable"));
zfs_verror(hdl, EZFS_BADDEV, fmt, ap);
break;
case ENAMETOOLONG:
zfs_verror(hdl, EZFS_DEVOVERFLOW, fmt, ap);
break;
case ENOTSUP:
zfs_verror(hdl, EZFS_POOL_NOTSUP, fmt, ap);
break;
case EINVAL:
zfs_verror(hdl, EZFS_POOL_INVALARG, fmt, ap);
break;
case ENOSPC:
case EDQUOT:
zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
return (-1);
case EAGAIN:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool I/O is currently suspended"));
zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
break;
case EROFS:
zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
break;
case EDOM:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"block size out of range or does not match"));
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
break;
case EREMOTEIO:
zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap);
break;
case ZFS_ERR_CHECKPOINT_EXISTS:
zfs_verror(hdl, EZFS_CHECKPOINT_EXISTS, fmt, ap);
break;
case ZFS_ERR_DISCARDING_CHECKPOINT:
zfs_verror(hdl, EZFS_DISCARDING_CHECKPOINT, fmt, ap);
break;
case ZFS_ERR_NO_CHECKPOINT:
zfs_verror(hdl, EZFS_NO_CHECKPOINT, fmt, ap);
break;
case ZFS_ERR_DEVRM_IN_PROGRESS:
zfs_verror(hdl, EZFS_DEVRM_IN_PROGRESS, fmt, ap);
break;
case ZFS_ERR_VDEV_TOO_BIG:
zfs_verror(hdl, EZFS_VDEV_TOO_BIG, fmt, ap);
break;
case ZFS_ERR_EXPORT_IN_PROGRESS:
zfs_verror(hdl, EZFS_EXPORT_IN_PROGRESS, fmt, ap);
break;
case ZFS_ERR_RESILVER_IN_PROGRESS:
zfs_verror(hdl, EZFS_RESILVERING, fmt, ap);
break;
case ZFS_ERR_REBUILD_IN_PROGRESS:
zfs_verror(hdl, EZFS_REBUILDING, fmt, ap);
break;
case ZFS_ERR_BADPROP:
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
break;
case ZFS_ERR_IOC_CMD_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support this operation. A reboot may "
"be required to enable this operation."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_UNAVAIL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "the loaded zfs "
"module does not support an option for this operation. "
"A reboot may be required to enable this option."));
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
case ZFS_ERR_IOC_ARG_REQUIRED:
case ZFS_ERR_IOC_ARG_BADTYPE:
zfs_verror(hdl, EZFS_IOC_NOTSUPPORTED, fmt, ap);
break;
default:
zfs_error_aux(hdl, "%s", strerror(error));
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
}
va_end(ap);
return (-1);
}
/*
* Display an out of memory error message and abort the current program.
*/
int
no_memory(libzfs_handle_t *hdl)
{
return (zfs_error(hdl, EZFS_NOMEM, "internal error"));
}
/*
* A safe form of malloc() which will die if the allocation fails.
*/
void *
zfs_alloc(libzfs_handle_t *hdl, size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
(void) no_memory(hdl);
return (data);
}
/*
* A safe form of asprintf() which will die if the allocation fails.
*/
char *
zfs_asprintf(libzfs_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
char *ret;
int err;
va_start(ap, fmt);
err = vasprintf(&ret, fmt, ap);
va_end(ap);
if (err < 0) {
(void) no_memory(hdl);
ret = NULL;
}
return (ret);
}
/*
* A safe form of realloc(), which also zeroes newly allocated space.
*/
void *
zfs_realloc(libzfs_handle_t *hdl, void *ptr, size_t oldsize, size_t newsize)
{
void *ret;
if ((ret = realloc(ptr, newsize)) == NULL) {
(void) no_memory(hdl);
return (NULL);
}
bzero((char *)ret + oldsize, (newsize - oldsize));
return (ret);
}
/*
* A safe form of strdup() which will die if the allocation fails.
*/
char *
zfs_strdup(libzfs_handle_t *hdl, const char *str)
{
char *ret;
if ((ret = strdup(str)) == NULL)
(void) no_memory(hdl);
return (ret);
}
void
libzfs_print_on_error(libzfs_handle_t *hdl, boolean_t printerr)
{
hdl->libzfs_printerr = printerr;
}
/*
* Read lines from an open file descriptor and store them in an array of
* strings until EOF. lines[] will be allocated and populated with all the
* lines read. All newlines are replaced with NULL terminators for
* convenience. lines[] must be freed after use with libzfs_free_str_array().
*
* Returns the number of lines read.
*/
static int
libzfs_read_stdout_from_fd(int fd, char **lines[])
{
FILE *fp;
int lines_cnt = 0;
size_t len = 0;
char *line = NULL;
char **tmp_lines = NULL, **tmp;
fp = fdopen(fd, "r");
if (fp == NULL) {
close(fd);
return (0);
}
while (getline(&line, &len, fp) != -1) {
tmp = realloc(tmp_lines, sizeof (*tmp_lines) * (lines_cnt + 1));
if (tmp == NULL) {
/* Return the lines we were able to process */
break;
}
tmp_lines = tmp;
/* Remove newline if not EOF */
if (line[strlen(line) - 1] == '\n')
line[strlen(line) - 1] = '\0';
tmp_lines[lines_cnt] = strdup(line);
if (tmp_lines[lines_cnt] == NULL)
break;
++lines_cnt;
}
free(line);
fclose(fp);
*lines = tmp_lines;
return (lines_cnt);
}
static int
libzfs_run_process_impl(const char *path, char *argv[], char *env[], int flags,
char **lines[], int *lines_cnt)
{
pid_t pid;
int error, devnull_fd;
int link[2];
/*
* Setup a pipe between our child and parent process if we're
* reading stdout.
*/
if (lines != NULL && pipe2(link, O_NONBLOCK | O_CLOEXEC) == -1)
return (-EPIPE);
pid = fork();
if (pid == 0) {
/* Child process */
devnull_fd = open("/dev/null", O_WRONLY | O_CLOEXEC);
if (devnull_fd < 0)
_exit(-1);
if (!(flags & STDOUT_VERBOSE) && (lines == NULL))
(void) dup2(devnull_fd, STDOUT_FILENO);
else if (lines != NULL) {
/* Save the output to lines[] */
dup2(link[1], STDOUT_FILENO);
}
if (!(flags & STDERR_VERBOSE))
(void) dup2(devnull_fd, STDERR_FILENO);
if (flags & NO_DEFAULT_PATH) {
if (env == NULL)
execv(path, argv);
else
execve(path, argv, env);
} else {
if (env == NULL)
execvp(path, argv);
else
execvpe(path, argv, env);
}
_exit(-1);
} else if (pid > 0) {
/* Parent process */
int status;
while ((error = waitpid(pid, &status, 0)) == -1 &&
errno == EINTR)
;
if (error < 0 || !WIFEXITED(status))
return (-1);
if (lines != NULL) {
close(link[1]);
*lines_cnt = libzfs_read_stdout_from_fd(link[0], lines);
}
return (WEXITSTATUS(status));
}
return (-1);
}
int
libzfs_run_process(const char *path, char *argv[], int flags)
{
return (libzfs_run_process_impl(path, argv, NULL, flags, NULL, NULL));
}
/*
* Run a command and store its stdout lines in an array of strings (lines[]).
* lines[] is allocated and populated for you, and the number of lines is set in
* lines_cnt. lines[] must be freed after use with libzfs_free_str_array().
* All newlines (\n) in lines[] are terminated for convenience.
*/
int
libzfs_run_process_get_stdout(const char *path, char *argv[], char *env[],
char **lines[], int *lines_cnt)
{
return (libzfs_run_process_impl(path, argv, env, 0, lines, lines_cnt));
}
/*
* Same as libzfs_run_process_get_stdout(), but run without $PATH set. This
* means that *path needs to be the full path to the executable.
*/
int
libzfs_run_process_get_stdout_nopath(const char *path, char *argv[],
char *env[], char **lines[], int *lines_cnt)
{
return (libzfs_run_process_impl(path, argv, env, NO_DEFAULT_PATH,
lines, lines_cnt));
}
/*
* Free an array of strings. Free both the strings contained in the array and
* the array itself.
*/
void
libzfs_free_str_array(char **strs, int count)
{
while (--count >= 0)
free(strs[count]);
free(strs);
}
/*
* Returns 1 if environment variable is set to "YES", "yes", "ON", "on", or
* a non-zero number.
*
* Returns 0 otherwise.
*/
int
libzfs_envvar_is_set(char *envvar)
{
char *env = getenv(envvar);
if (env && (strtoul(env, NULL, 0) > 0 ||
(!strncasecmp(env, "YES", 3) && strnlen(env, 4) == 3) ||
(!strncasecmp(env, "ON", 2) && strnlen(env, 3) == 2)))
return (1);
return (0);
}
libzfs_handle_t *
libzfs_init(void)
{
libzfs_handle_t *hdl;
int error;
char *env;
if ((error = libzfs_load_module()) != 0) {
errno = error;
return (NULL);
}
if ((hdl = calloc(1, sizeof (libzfs_handle_t))) == NULL) {
return (NULL);
}
if (regcomp(&hdl->libzfs_urire, URI_REGEX, 0) != 0) {
free(hdl);
return (NULL);
}
if ((hdl->libzfs_fd = open(ZFS_DEV, O_RDWR|O_EXCL|O_CLOEXEC)) < 0) {
free(hdl);
return (NULL);
}
if (libzfs_core_init() != 0) {
(void) close(hdl->libzfs_fd);
free(hdl);
return (NULL);
}
zfs_prop_init();
zpool_prop_init();
zpool_feature_init();
libzfs_mnttab_init(hdl);
fletcher_4_init();
if (getenv("ZFS_PROP_DEBUG") != NULL) {
hdl->libzfs_prop_debug = B_TRUE;
}
if ((env = getenv("ZFS_SENDRECV_MAX_NVLIST")) != NULL) {
if ((error = zfs_nicestrtonum(hdl, env,
&hdl->libzfs_max_nvlist))) {
errno = error;
(void) close(hdl->libzfs_fd);
free(hdl);
return (NULL);
}
} else {
hdl->libzfs_max_nvlist = (SPA_MAXBLOCKSIZE * 4);
}
/*
* For testing, remove some settable properties and features
*/
if (libzfs_envvar_is_set("ZFS_SYSFS_PROP_SUPPORT_TEST")) {
zprop_desc_t *proptbl;
proptbl = zpool_prop_get_table();
proptbl[ZPOOL_PROP_COMMENT].pd_zfs_mod_supported = B_FALSE;
proptbl = zfs_prop_get_table();
proptbl[ZFS_PROP_DNODESIZE].pd_zfs_mod_supported = B_FALSE;
zfeature_info_t *ftbl = spa_feature_table;
ftbl[SPA_FEATURE_LARGE_BLOCKS].fi_zfs_mod_supported = B_FALSE;
}
return (hdl);
}
void
libzfs_fini(libzfs_handle_t *hdl)
{
(void) close(hdl->libzfs_fd);
zpool_free_handles(hdl);
namespace_clear(hdl);
libzfs_mnttab_fini(hdl);
libzfs_core_fini();
regfree(&hdl->libzfs_urire);
fletcher_4_fini();
#if LIBFETCH_DYNAMIC
if (hdl->libfetch != (void *)-1 && hdl->libfetch != NULL)
(void) dlclose(hdl->libfetch);
free(hdl->libfetch_load_error);
#endif
free(hdl);
}
libzfs_handle_t *
zpool_get_handle(zpool_handle_t *zhp)
{
return (zhp->zpool_hdl);
}
libzfs_handle_t *
zfs_get_handle(zfs_handle_t *zhp)
{
return (zhp->zfs_hdl);
}
zpool_handle_t *
zfs_get_pool_handle(const zfs_handle_t *zhp)
{
return (zhp->zpool_hdl);
}
/*
* Given a name, determine whether or not it's a valid path
* (starts with '/' or "./"). If so, walk the mnttab trying
* to match the device number. If not, treat the path as an
* fs/vol/snap/bkmark name.
*/
zfs_handle_t *
zfs_path_to_zhandle(libzfs_handle_t *hdl, const char *path, zfs_type_t argtype)
{
struct stat64 statbuf;
struct extmnttab entry;
if (path[0] != '/' && strncmp(path, "./", strlen("./")) != 0) {
/*
* It's not a valid path, assume it's a name of type 'argtype'.
*/
return (zfs_open(hdl, path, argtype));
}
if (getextmntent(path, &entry, &statbuf) != 0)
return (NULL);
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) {
(void) fprintf(stderr, gettext("'%s': not a ZFS filesystem\n"),
path);
return (NULL);
}
return (zfs_open(hdl, entry.mnt_special, ZFS_TYPE_FILESYSTEM));
}
/*
* Initialize the zc_nvlist_dst member to prepare for receiving an nvlist from
* an ioctl().
*/
int
zcmd_alloc_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, size_t len)
{
if (len == 0)
len = 256 * 1024;
zc->zc_nvlist_dst_size = len;
zc->zc_nvlist_dst =
(uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size);
if (zc->zc_nvlist_dst == 0)
return (-1);
return (0);
}
/*
* Called when an ioctl() which returns an nvlist fails with ENOMEM. This will
* expand the nvlist to the size specified in 'zc_nvlist_dst_size', which was
* filled in by the kernel to indicate the actual required size.
*/
int
zcmd_expand_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc)
{
free((void *)(uintptr_t)zc->zc_nvlist_dst);
zc->zc_nvlist_dst =
(uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size);
if (zc->zc_nvlist_dst == 0)
return (-1);
return (0);
}
/*
* Called to free the src and dst nvlists stored in the command structure.
*/
void
zcmd_free_nvlists(zfs_cmd_t *zc)
{
free((void *)(uintptr_t)zc->zc_nvlist_conf);
free((void *)(uintptr_t)zc->zc_nvlist_src);
free((void *)(uintptr_t)zc->zc_nvlist_dst);
zc->zc_nvlist_conf = 0;
zc->zc_nvlist_src = 0;
zc->zc_nvlist_dst = 0;
}
static int
zcmd_write_nvlist_com(libzfs_handle_t *hdl, uint64_t *outnv, uint64_t *outlen,
nvlist_t *nvl)
{
char *packed;
size_t len;
verify(nvlist_size(nvl, &len, NV_ENCODE_NATIVE) == 0);
if ((packed = zfs_alloc(hdl, len)) == NULL)
return (-1);
verify(nvlist_pack(nvl, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
*outnv = (uint64_t)(uintptr_t)packed;
*outlen = len;
return (0);
}
int
zcmd_write_conf_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
{
return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_conf,
&zc->zc_nvlist_conf_size, nvl));
}
int
zcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
{
return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_src,
&zc->zc_nvlist_src_size, nvl));
}
/*
* Unpacks an nvlist from the ZFS ioctl command structure.
*/
int
zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp)
{
if (nvlist_unpack((void *)(uintptr_t)zc->zc_nvlist_dst,
zc->zc_nvlist_dst_size, nvlp, 0) != 0)
return (no_memory(hdl));
return (0);
}
/*
* ================================================================
* API shared by zfs and zpool property management
* ================================================================
*/
static void
zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type)
{
zprop_list_t *pl = cbp->cb_proplist;
int i;
char *title;
size_t len;
cbp->cb_first = B_FALSE;
if (cbp->cb_scripted)
return;
/*
* Start with the length of the column headers.
*/
cbp->cb_colwidths[GET_COL_NAME] = strlen(dgettext(TEXT_DOMAIN, "NAME"));
cbp->cb_colwidths[GET_COL_PROPERTY] = strlen(dgettext(TEXT_DOMAIN,
"PROPERTY"));
cbp->cb_colwidths[GET_COL_VALUE] = strlen(dgettext(TEXT_DOMAIN,
"VALUE"));
cbp->cb_colwidths[GET_COL_RECVD] = strlen(dgettext(TEXT_DOMAIN,
"RECEIVED"));
cbp->cb_colwidths[GET_COL_SOURCE] = strlen(dgettext(TEXT_DOMAIN,
"SOURCE"));
/* first property is always NAME */
assert(cbp->cb_proplist->pl_prop ==
((type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME : ZFS_PROP_NAME));
/*
* Go through and calculate the widths for each column. For the
* 'source' column, we kludge it up by taking the worst-case scenario of
* inheriting from the longest name. This is acceptable because in the
* majority of cases 'SOURCE' is the last column displayed, and we don't
* use the width anyway. Note that the 'VALUE' column can be oversized,
* if the name of the property is much longer than any values we find.
*/
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
/*
* 'PROPERTY' column
*/
if (pl->pl_prop != ZPROP_INVAL) {
const char *propname = (type == ZFS_TYPE_POOL) ?
zpool_prop_to_name(pl->pl_prop) :
zfs_prop_to_name(pl->pl_prop);
len = strlen(propname);
if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
cbp->cb_colwidths[GET_COL_PROPERTY] = len;
} else {
len = strlen(pl->pl_user_prop);
if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
cbp->cb_colwidths[GET_COL_PROPERTY] = len;
}
/*
* 'VALUE' column. The first property is always the 'name'
* property that was tacked on either by /sbin/zfs's
* zfs_do_get() or when calling zprop_expand_list(), so we
* ignore its width. If the user specified the name property
* to display, then it will be later in the list in any case.
*/
if (pl != cbp->cb_proplist &&
pl->pl_width > cbp->cb_colwidths[GET_COL_VALUE])
cbp->cb_colwidths[GET_COL_VALUE] = pl->pl_width;
/* 'RECEIVED' column. */
if (pl != cbp->cb_proplist &&
pl->pl_recvd_width > cbp->cb_colwidths[GET_COL_RECVD])
cbp->cb_colwidths[GET_COL_RECVD] = pl->pl_recvd_width;
/*
* 'NAME' and 'SOURCE' columns
*/
if (pl->pl_prop == (type == ZFS_TYPE_POOL ? ZPOOL_PROP_NAME :
ZFS_PROP_NAME) &&
pl->pl_width > cbp->cb_colwidths[GET_COL_NAME]) {
cbp->cb_colwidths[GET_COL_NAME] = pl->pl_width;
cbp->cb_colwidths[GET_COL_SOURCE] = pl->pl_width +
strlen(dgettext(TEXT_DOMAIN, "inherited from"));
}
}
/*
* Now go through and print the headers.
*/
for (i = 0; i < ZFS_GET_NCOLS; i++) {
switch (cbp->cb_columns[i]) {
case GET_COL_NAME:
title = dgettext(TEXT_DOMAIN, "NAME");
break;
case GET_COL_PROPERTY:
title = dgettext(TEXT_DOMAIN, "PROPERTY");
break;
case GET_COL_VALUE:
title = dgettext(TEXT_DOMAIN, "VALUE");
break;
case GET_COL_RECVD:
title = dgettext(TEXT_DOMAIN, "RECEIVED");
break;
case GET_COL_SOURCE:
title = dgettext(TEXT_DOMAIN, "SOURCE");
break;
default:
title = NULL;
}
if (title != NULL) {
if (i == (ZFS_GET_NCOLS - 1) ||
cbp->cb_columns[i + 1] == GET_COL_NONE)
(void) printf("%s", title);
else
(void) printf("%-*s ",
cbp->cb_colwidths[cbp->cb_columns[i]],
title);
}
}
(void) printf("\n");
}
/*
* Display a single line of output, according to the settings in the callback
* structure.
*/
void
zprop_print_one_property(const char *name, zprop_get_cbdata_t *cbp,
const char *propname, const char *value, zprop_source_t sourcetype,
const char *source, const char *recvd_value)
{
int i;
const char *str = NULL;
char buf[128];
/*
* Ignore those source types that the user has chosen to ignore.
*/
if ((sourcetype & cbp->cb_sources) == 0)
return;
if (cbp->cb_first)
zprop_print_headers(cbp, cbp->cb_type);
for (i = 0; i < ZFS_GET_NCOLS; i++) {
switch (cbp->cb_columns[i]) {
case GET_COL_NAME:
str = name;
break;
case GET_COL_PROPERTY:
str = propname;
break;
case GET_COL_VALUE:
str = value;
break;
case GET_COL_SOURCE:
switch (sourcetype) {
case ZPROP_SRC_NONE:
str = "-";
break;
case ZPROP_SRC_DEFAULT:
str = "default";
break;
case ZPROP_SRC_LOCAL:
str = "local";
break;
case ZPROP_SRC_TEMPORARY:
str = "temporary";
break;
case ZPROP_SRC_INHERITED:
(void) snprintf(buf, sizeof (buf),
"inherited from %s", source);
str = buf;
break;
case ZPROP_SRC_RECEIVED:
str = "received";
break;
default:
str = NULL;
assert(!"unhandled zprop_source_t");
}
break;
case GET_COL_RECVD:
str = (recvd_value == NULL ? "-" : recvd_value);
break;
default:
continue;
}
if (i == (ZFS_GET_NCOLS - 1) ||
cbp->cb_columns[i + 1] == GET_COL_NONE)
(void) printf("%s", str);
else if (cbp->cb_scripted)
(void) printf("%s\t", str);
else
(void) printf("%-*s ",
cbp->cb_colwidths[cbp->cb_columns[i]],
str);
}
(void) printf("\n");
}
/*
* Given a numeric suffix, convert the value into a number of bits that the
* resulting value must be shifted.
*/
static int
str2shift(libzfs_handle_t *hdl, const char *buf)
{
const char *ends = "BKMGTPEZ";
int i;
if (buf[0] == '\0')
return (0);
for (i = 0; i < strlen(ends); i++) {
if (toupper(buf[0]) == ends[i])
break;
}
if (i == strlen(ends)) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid numeric suffix '%s'"), buf);
return (-1);
}
/*
* Allow 'G' = 'GB' = 'GiB', case-insensitively.
* However, 'BB' and 'BiB' are disallowed.
*/
if (buf[1] == '\0' ||
(toupper(buf[0]) != 'B' &&
((toupper(buf[1]) == 'B' && buf[2] == '\0') ||
(toupper(buf[1]) == 'I' && toupper(buf[2]) == 'B' &&
buf[3] == '\0'))))
return (10 * i);
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid numeric suffix '%s'"), buf);
return (-1);
}
/*
* Convert a string of the form '100G' into a real number. Used when setting
* properties or creating a volume. 'buf' is used to place an extended error
* message for the caller to use.
*/
int
zfs_nicestrtonum(libzfs_handle_t *hdl, const char *value, uint64_t *num)
{
char *end;
int shift;
*num = 0;
/* Check to see if this looks like a number. */
if ((value[0] < '0' || value[0] > '9') && value[0] != '.') {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bad numeric value '%s'"), value);
return (-1);
}
/* Rely on strtoull() to process the numeric portion. */
errno = 0;
*num = strtoull(value, &end, 10);
/*
* Check for ERANGE, which indicates that the value is too large to fit
* in a 64-bit value.
*/
if (errno == ERANGE) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
/*
* If we have a decimal value, then do the computation with floating
* point arithmetic. Otherwise, use standard arithmetic.
*/
if (*end == '.') {
double fval = strtod(value, &end);
if ((shift = str2shift(hdl, end)) == -1)
return (-1);
fval *= pow(2, shift);
/*
* UINT64_MAX is not exactly representable as a double.
* The closest representation is UINT64_MAX + 1, so we
* use a >= comparison instead of > for the bounds check.
*/
if (fval >= (double)UINT64_MAX) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
*num = (uint64_t)fval;
} else {
if ((shift = str2shift(hdl, end)) == -1)
return (-1);
/* Check for overflow */
if (shift >= 64 || (*num << shift) >> shift != *num) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
*num <<= shift;
}
return (0);
}
/*
* Given a propname=value nvpair to set, parse any numeric properties
* (index, boolean, etc) if they are specified as strings and add the
* resulting nvpair to the returned nvlist.
*
* At the DSL layer, all properties are either 64-bit numbers or strings.
* We want the user to be able to ignore this fact and specify properties
* as native values (numbers, for example) or as strings (to simplify
* command line utilities). This also handles converting index types
* (compression, checksum, etc) from strings to their on-disk index.
*/
int
zprop_parse_value(libzfs_handle_t *hdl, nvpair_t *elem, int prop,
zfs_type_t type, nvlist_t *ret, char **svalp, uint64_t *ivalp,
const char *errbuf)
{
data_type_t datatype = nvpair_type(elem);
zprop_type_t proptype;
const char *propname;
char *value;
boolean_t isnone = B_FALSE;
boolean_t isauto = B_FALSE;
int err = 0;
if (type == ZFS_TYPE_POOL) {
proptype = zpool_prop_get_type(prop);
propname = zpool_prop_to_name(prop);
} else {
proptype = zfs_prop_get_type(prop);
propname = zfs_prop_to_name(prop);
}
/*
* Convert any properties to the internal DSL value types.
*/
*svalp = NULL;
*ivalp = 0;
switch (proptype) {
case PROP_TYPE_STRING:
if (datatype != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), nvpair_name(elem));
goto error;
}
err = nvpair_value_string(elem, svalp);
if (err != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is invalid"), nvpair_name(elem));
goto error;
}
if (strlen(*svalp) >= ZFS_MAXPROPLEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is too long"), nvpair_name(elem));
goto error;
}
break;
case PROP_TYPE_NUMBER:
if (datatype == DATA_TYPE_STRING) {
(void) nvpair_value_string(elem, &value);
if (strcmp(value, "none") == 0) {
isnone = B_TRUE;
} else if (strcmp(value, "auto") == 0) {
isauto = B_TRUE;
} else if (zfs_nicestrtonum(hdl, value, ivalp) != 0) {
goto error;
}
} else if (datatype == DATA_TYPE_UINT64) {
(void) nvpair_value_uint64(elem, ivalp);
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a number"), nvpair_name(elem));
goto error;
}
/*
* Quota special: force 'none' and don't allow 0.
*/
if ((type & ZFS_TYPE_DATASET) && *ivalp == 0 && !isnone &&
(prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_REFQUOTA)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"use 'none' to disable quota/refquota"));
goto error;
}
/*
* Special handling for "*_limit=none". In this case it's not
* 0 but UINT64_MAX.
*/
if ((type & ZFS_TYPE_DATASET) && isnone &&
(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
prop == ZFS_PROP_SNAPSHOT_LIMIT)) {
*ivalp = UINT64_MAX;
}
/*
* Special handling for setting 'refreservation' to 'auto'. Use
* UINT64_MAX to tell the caller to use zfs_fix_auto_resv().
* 'auto' is only allowed on volumes.
*/
if (isauto) {
switch (prop) {
case ZFS_PROP_REFRESERVATION:
if ((type & ZFS_TYPE_VOLUME) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s=auto' only allowed on "
"volumes"), nvpair_name(elem));
goto error;
}
*ivalp = UINT64_MAX;
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'auto' is invalid value for '%s'"),
nvpair_name(elem));
goto error;
}
}
break;
case PROP_TYPE_INDEX:
if (datatype != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), nvpair_name(elem));
goto error;
}
(void) nvpair_value_string(elem, &value);
if (zprop_string_to_index(prop, value, ivalp, type) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be one of '%s'"), propname,
zprop_values(prop, type));
goto error;
}
break;
default:
abort();
}
/*
* Add the result to our return set of properties.
*/
if (*svalp != NULL) {
if (nvlist_add_string(ret, propname, *svalp) != 0) {
(void) no_memory(hdl);
return (-1);
}
} else {
if (nvlist_add_uint64(ret, propname, *ivalp) != 0) {
(void) no_memory(hdl);
return (-1);
}
}
return (0);
error:
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
return (-1);
}
static int
addlist(libzfs_handle_t *hdl, char *propname, zprop_list_t **listp,
zfs_type_t type)
{
int prop;
zprop_list_t *entry;
prop = zprop_name_to_prop(propname, type);
if (prop != ZPROP_INVAL && !zprop_valid_for_type(prop, type, B_FALSE))
prop = ZPROP_INVAL;
/*
* When no property table entry can be found, return failure if
* this is a pool property or if this isn't a user-defined
* dataset property,
*/
if (prop == ZPROP_INVAL && ((type == ZFS_TYPE_POOL &&
!zpool_prop_feature(propname) &&
!zpool_prop_unsupported(propname)) ||
(type == ZFS_TYPE_DATASET && !zfs_prop_user(propname) &&
!zfs_prop_userquota(propname) && !zfs_prop_written(propname)))) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
return (zfs_error(hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "bad property list")));
}
if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL)
return (-1);
entry->pl_prop = prop;
if (prop == ZPROP_INVAL) {
if ((entry->pl_user_prop = zfs_strdup(hdl, propname)) ==
NULL) {
free(entry);
return (-1);
}
entry->pl_width = strlen(propname);
} else {
entry->pl_width = zprop_width(prop, &entry->pl_fixed,
type);
}
*listp = entry;
return (0);
}
/*
* Given a comma-separated list of properties, construct a property list
* containing both user-defined and native properties. This function will
* return a NULL list if 'all' is specified, which can later be expanded
* by zprop_expand_list().
*/
int
zprop_get_list(libzfs_handle_t *hdl, char *props, zprop_list_t **listp,
zfs_type_t type)
{
*listp = NULL;
/*
* If 'all' is specified, return a NULL list.
*/
if (strcmp(props, "all") == 0)
return (0);
/*
* If no props were specified, return an error.
*/
if (props[0] == '\0') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no properties specified"));
return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN,
"bad property list")));
}
/*
* It would be nice to use getsubopt() here, but the inclusion of column
* aliases makes this more effort than it's worth.
*/
while (*props != '\0') {
size_t len;
char *p;
char c;
if ((p = strchr(props, ',')) == NULL) {
len = strlen(props);
p = props + len;
} else {
len = p - props;
}
/*
* Check for empty options.
*/
if (len == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty property name"));
return (zfs_error(hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "bad property list")));
}
/*
* Check all regular property names.
*/
c = props[len];
props[len] = '\0';
if (strcmp(props, "space") == 0) {
static char *spaceprops[] = {
"name", "avail", "used", "usedbysnapshots",
"usedbydataset", "usedbyrefreservation",
"usedbychildren", NULL
};
int i;
for (i = 0; spaceprops[i]; i++) {
if (addlist(hdl, spaceprops[i], listp, type))
return (-1);
listp = &(*listp)->pl_next;
}
} else {
if (addlist(hdl, props, listp, type))
return (-1);
listp = &(*listp)->pl_next;
}
props = p;
if (c == ',')
props++;
}
return (0);
}
void
zprop_free_list(zprop_list_t *pl)
{
zprop_list_t *next;
while (pl != NULL) {
next = pl->pl_next;
free(pl->pl_user_prop);
free(pl);
pl = next;
}
}
typedef struct expand_data {
zprop_list_t **last;
libzfs_handle_t *hdl;
zfs_type_t type;
} expand_data_t;
static int
zprop_expand_list_cb(int prop, void *cb)
{
zprop_list_t *entry;
expand_data_t *edp = cb;
if ((entry = zfs_alloc(edp->hdl, sizeof (zprop_list_t))) == NULL)
return (ZPROP_INVAL);
entry->pl_prop = prop;
entry->pl_width = zprop_width(prop, &entry->pl_fixed, edp->type);
entry->pl_all = B_TRUE;
*(edp->last) = entry;
edp->last = &entry->pl_next;
return (ZPROP_CONT);
}
int
zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp, zfs_type_t type)
{
zprop_list_t *entry;
zprop_list_t **last;
expand_data_t exp;
if (*plp == NULL) {
/*
* If this is the very first time we've been called for an 'all'
* specification, expand the list to include all native
* properties.
*/
last = plp;
exp.last = last;
exp.hdl = hdl;
exp.type = type;
if (zprop_iter_common(zprop_expand_list_cb, &exp, B_FALSE,
B_FALSE, type) == ZPROP_INVAL)
return (-1);
/*
* Add 'name' to the beginning of the list, which is handled
* specially.
*/
if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL)
return (-1);
entry->pl_prop = (type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME :
ZFS_PROP_NAME;
entry->pl_width = zprop_width(entry->pl_prop,
&entry->pl_fixed, type);
entry->pl_all = B_TRUE;
entry->pl_next = *plp;
*plp = entry;
}
return (0);
}
int
zprop_iter(zprop_func func, void *cb, boolean_t show_all, boolean_t ordered,
zfs_type_t type)
{
return (zprop_iter_common(func, cb, show_all, ordered, type));
}
/*
* Fill given version buffer with zfs userland version
*/
void
zfs_version_userland(char *version, int len)
{
(void) strlcpy(version, ZFS_META_ALIAS, len);
}
/*
* Prints both zfs userland and kernel versions
* Returns 0 on success, and -1 on error (with errno set)
*/
int
zfs_version_print(void)
{
char zver_userland[128];
char zver_kernel[128];
zfs_version_userland(zver_userland, sizeof (zver_userland));
(void) printf("%s\n", zver_userland);
if (zfs_version_kernel(zver_kernel, sizeof (zver_kernel)) == -1) {
fprintf(stderr, "zfs_version_kernel() failed: %s\n",
strerror(errno));
return (-1);
}
(void) printf("zfs-kmod-%s\n", zver_kernel);
return (0);
}
/*
* Return 1 if the user requested ANSI color output, and our terminal supports
* it. Return 0 for no color.
*/
static int
use_color(void)
{
static int use_color = -1;
char *term;
/*
* Optimization:
*
* For each zpool invocation, we do a single check to see if we should
* be using color or not, and cache that value for the lifetime of the
* the zpool command. That makes it cheap to call use_color() when
* we're printing with color. We assume that the settings are not going
* to change during the invocation of a zpool command (the user isn't
* going to change the ZFS_COLOR value while zpool is running, for
* example).
*/
if (use_color != -1) {
/*
* We've already figured out if we should be using color or
* not. Return the cached value.
*/
return (use_color);
}
term = getenv("TERM");
/*
* The user sets the ZFS_COLOR env var set to enable zpool ANSI color
* output. However if NO_COLOR is set (https://no-color.org/) then
* don't use it. Also, don't use color if terminal doesn't support
* it.
*/
if (libzfs_envvar_is_set("ZFS_COLOR") &&
!libzfs_envvar_is_set("NO_COLOR") &&
isatty(STDOUT_FILENO) && term && strcmp("dumb", term) != 0 &&
strcmp("unknown", term) != 0) {
/* Color supported */
use_color = 1;
} else {
use_color = 0;
}
return (use_color);
}
/*
* color_start() and color_end() are used for when you want to colorize a block
* of text. For example:
*
* color_start(ANSI_RED_FG)
* printf("hello");
* printf("world");
* color_end();
*/
void
color_start(char *color)
{
if (use_color())
printf("%s", color);
}
void
color_end(void)
{
if (use_color())
printf(ANSI_RESET);
}
/* printf() with a color. If color is NULL, then do a normal printf. */
int
printf_color(char *color, char *format, ...)
{
va_list aptr;
int rc;
if (color)
color_start(color);
va_start(aptr, format);
rc = vprintf(format, aptr);
va_end(aptr);
if (color)
color_end();
return (rc);
}
diff --git a/sys/contrib/openzfs/lib/libzfs/os/freebsd/libzfs_zmount.c b/sys/contrib/openzfs/lib/libzfs/os/freebsd/libzfs_zmount.c
index 6bc073cb03b9..12317fdde38e 100644
--- a/sys/contrib/openzfs/lib/libzfs/os/freebsd/libzfs_zmount.c
+++ b/sys/contrib/openzfs/lib/libzfs/os/freebsd/libzfs_zmount.c
@@ -1,135 +1,147 @@
/*
* Copyright (c) 2006 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
/*
* This file implements Solaris compatible zmount() function.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/mount.h>
#include <sys/uio.h>
#include <sys/mntent.h>
#include <assert.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mnttab.h>
#include <sys/errno.h>
#include <libzfs.h>
#include "../../libzfs_impl.h"
static void
build_iovec(struct iovec **iov, int *iovlen, const char *name, void *val,
size_t len)
{
int i;
if (*iovlen < 0)
return;
i = *iovlen;
*iov = realloc(*iov, sizeof (**iov) * (i + 2));
if (*iov == NULL) {
*iovlen = -1;
return;
}
(*iov)[i].iov_base = strdup(name);
(*iov)[i].iov_len = strlen(name) + 1;
i++;
(*iov)[i].iov_base = val;
if (len == (size_t)-1) {
if (val != NULL)
len = strlen(val) + 1;
else
len = 0;
}
(*iov)[i].iov_len = (int)len;
*iovlen = ++i;
}
static int
do_mount_(const char *spec, const char *dir, int mflag, char *fstype,
char *dataptr, int datalen, char *optptr, int optlen)
{
struct iovec *iov;
char *optstr, *p, *tofree;
int iovlen, rv;
assert(spec != NULL);
assert(dir != NULL);
assert(fstype != NULL);
assert(strcmp(fstype, MNTTYPE_ZFS) == 0);
assert(dataptr == NULL);
assert(datalen == 0);
assert(optptr != NULL);
assert(optlen > 0);
tofree = optstr = strdup(optptr);
assert(optstr != NULL);
iov = NULL;
iovlen = 0;
if (strstr(optstr, MNTOPT_REMOUNT) != NULL)
build_iovec(&iov, &iovlen, "update", NULL, 0);
if (mflag & MS_RDONLY)
build_iovec(&iov, &iovlen, "ro", NULL, 0);
build_iovec(&iov, &iovlen, "fstype", fstype, (size_t)-1);
build_iovec(&iov, &iovlen, "fspath", __DECONST(char *, dir),
(size_t)-1);
build_iovec(&iov, &iovlen, "from", __DECONST(char *, spec), (size_t)-1);
while ((p = strsep(&optstr, ",/")) != NULL)
build_iovec(&iov, &iovlen, p, NULL, (size_t)-1);
rv = nmount(iov, iovlen, 0);
free(tofree);
if (rv < 0)
return (errno);
return (rv);
}
int
do_mount(zfs_handle_t *zhp, const char *mntpt, char *opts, int flags)
{
return (do_mount_(zfs_get_name(zhp), mntpt, flags, MNTTYPE_ZFS, NULL, 0,
opts, sizeof (mntpt)));
}
int
do_unmount(zfs_handle_t *zhp, const char *mntpt, int flags)
{
if (unmount(mntpt, flags) < 0)
return (errno);
return (0);
}
int
zfs_mount_delegation_check(void)
{
return (0);
}
+
+/* Called from the tail end of zpool_disable_datasets() */
+void
+zpool_disable_datasets_os(zpool_handle_t *zhp, boolean_t force)
+{
+}
+
+/* Called from the tail end of zfs_unmount() */
+void
+zpool_disable_volume_os(const char *name)
+{
+}
diff --git a/sys/contrib/openzfs/lib/libzfs/os/linux/libzfs_mount_os.c b/sys/contrib/openzfs/lib/libzfs/os/linux/libzfs_mount_os.c
index 42f300b36c91..29fea736b605 100644
--- a/sys/contrib/openzfs/lib/libzfs/os/linux/libzfs_mount_os.c
+++ b/sys/contrib/openzfs/lib/libzfs/os/linux/libzfs_mount_os.c
@@ -1,413 +1,425 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2021 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright 2017 RackTop Systems.
* Copyright (c) 2018 Datto Inc.
* Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
*/
#include <dirent.h>
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <zone.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/vfs.h>
#include <sys/dsl_crypt.h>
#include <libzfs.h>
#include "../../libzfs_impl.h"
#include <thread_pool.h>
#define ZS_COMMENT 0x00000000 /* comment */
#define ZS_ZFSUTIL 0x00000001 /* caller is zfs(8) */
typedef struct option_map {
const char *name;
unsigned long mntmask;
unsigned long zfsmask;
} option_map_t;
static const option_map_t option_map[] = {
/* Canonicalized filesystem independent options from mount(8) */
{ MNTOPT_NOAUTO, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_DEFAULTS, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_NODEVICES, MS_NODEV, ZS_COMMENT },
{ MNTOPT_DEVICES, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_DIRSYNC, MS_DIRSYNC, ZS_COMMENT },
{ MNTOPT_NOEXEC, MS_NOEXEC, ZS_COMMENT },
{ MNTOPT_EXEC, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_GROUP, MS_GROUP, ZS_COMMENT },
{ MNTOPT_NETDEV, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_NOFAIL, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_NOSUID, MS_NOSUID, ZS_COMMENT },
{ MNTOPT_SUID, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_OWNER, MS_OWNER, ZS_COMMENT },
{ MNTOPT_REMOUNT, MS_REMOUNT, ZS_COMMENT },
{ MNTOPT_RO, MS_RDONLY, ZS_COMMENT },
{ MNTOPT_RW, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_SYNC, MS_SYNCHRONOUS, ZS_COMMENT },
{ MNTOPT_USER, MS_USERS, ZS_COMMENT },
{ MNTOPT_USERS, MS_USERS, ZS_COMMENT },
/* acl flags passed with util-linux-2.24 mount command */
{ MNTOPT_ACL, MS_POSIXACL, ZS_COMMENT },
{ MNTOPT_NOACL, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_POSIXACL, MS_POSIXACL, ZS_COMMENT },
#ifdef MS_NOATIME
{ MNTOPT_NOATIME, MS_NOATIME, ZS_COMMENT },
{ MNTOPT_ATIME, MS_COMMENT, ZS_COMMENT },
#endif
#ifdef MS_NODIRATIME
{ MNTOPT_NODIRATIME, MS_NODIRATIME, ZS_COMMENT },
{ MNTOPT_DIRATIME, MS_COMMENT, ZS_COMMENT },
#endif
#ifdef MS_RELATIME
{ MNTOPT_RELATIME, MS_RELATIME, ZS_COMMENT },
{ MNTOPT_NORELATIME, MS_COMMENT, ZS_COMMENT },
#endif
#ifdef MS_STRICTATIME
{ MNTOPT_STRICTATIME, MS_STRICTATIME, ZS_COMMENT },
{ MNTOPT_NOSTRICTATIME, MS_COMMENT, ZS_COMMENT },
#endif
#ifdef MS_LAZYTIME
{ MNTOPT_LAZYTIME, MS_LAZYTIME, ZS_COMMENT },
#endif
{ MNTOPT_CONTEXT, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_FSCONTEXT, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_DEFCONTEXT, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_ROOTCONTEXT, MS_COMMENT, ZS_COMMENT },
#ifdef MS_I_VERSION
{ MNTOPT_IVERSION, MS_I_VERSION, ZS_COMMENT },
#endif
#ifdef MS_MANDLOCK
{ MNTOPT_NBMAND, MS_MANDLOCK, ZS_COMMENT },
{ MNTOPT_NONBMAND, MS_COMMENT, ZS_COMMENT },
#endif
/* Valid options not found in mount(8) */
{ MNTOPT_BIND, MS_BIND, ZS_COMMENT },
#ifdef MS_REC
{ MNTOPT_RBIND, MS_BIND|MS_REC, ZS_COMMENT },
#endif
{ MNTOPT_COMMENT, MS_COMMENT, ZS_COMMENT },
#ifdef MS_NOSUB
{ MNTOPT_NOSUB, MS_NOSUB, ZS_COMMENT },
#endif
#ifdef MS_SILENT
{ MNTOPT_QUIET, MS_SILENT, ZS_COMMENT },
#endif
/* Custom zfs options */
{ MNTOPT_XATTR, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_NOXATTR, MS_COMMENT, ZS_COMMENT },
{ MNTOPT_ZFSUTIL, MS_COMMENT, ZS_ZFSUTIL },
{ NULL, 0, 0 } };
/*
* Break the mount option in to a name/value pair. The name is
* validated against the option map and mount flags set accordingly.
*/
static int
parse_option(char *mntopt, unsigned long *mntflags,
unsigned long *zfsflags, int sloppy)
{
const option_map_t *opt;
char *ptr, *name, *value = NULL;
int error = 0;
name = strdup(mntopt);
if (name == NULL)
return (ENOMEM);
for (ptr = name; ptr && *ptr; ptr++) {
if (*ptr == '=') {
*ptr = '\0';
value = ptr+1;
VERIFY3P(value, !=, NULL);
break;
}
}
for (opt = option_map; opt->name != NULL; opt++) {
if (strncmp(name, opt->name, strlen(name)) == 0) {
*mntflags |= opt->mntmask;
*zfsflags |= opt->zfsmask;
error = 0;
goto out;
}
}
if (!sloppy)
error = ENOENT;
out:
/* If required further process on the value may be done here */
free(name);
return (error);
}
/*
* Translate the mount option string in to MS_* mount flags for the
* kernel vfs. When sloppy is non-zero unknown options will be ignored
* otherwise they are considered fatal are copied in to badopt.
*/
int
zfs_parse_mount_options(char *mntopts, unsigned long *mntflags,
unsigned long *zfsflags, int sloppy, char *badopt, char *mtabopt)
{
int error = 0, quote = 0, flag = 0, count = 0;
char *ptr, *opt, *opts;
opts = strdup(mntopts);
if (opts == NULL)
return (ENOMEM);
*mntflags = 0;
opt = NULL;
/*
* Scan through all mount options which must be comma delimited.
* We must be careful to notice regions which are double quoted
* and skip commas in these regions. Each option is then checked
* to determine if it is a known option.
*/
for (ptr = opts; ptr && !flag; ptr++) {
if (opt == NULL)
opt = ptr;
if (*ptr == '"')
quote = !quote;
if (quote)
continue;
if (*ptr == '\0')
flag = 1;
if ((*ptr == ',') || (*ptr == '\0')) {
*ptr = '\0';
error = parse_option(opt, mntflags, zfsflags, sloppy);
if (error) {
strcpy(badopt, opt);
goto out;
}
if (!(*mntflags & MS_REMOUNT) &&
!(*zfsflags & ZS_ZFSUTIL) &&
mtabopt != NULL) {
if (count > 0)
strlcat(mtabopt, ",", MNT_LINE_MAX);
strlcat(mtabopt, opt, MNT_LINE_MAX);
count++;
}
opt = NULL;
}
}
out:
free(opts);
return (error);
}
static void
append_mntopt(const char *name, const char *val, char *mntopts,
char *mtabopt, boolean_t quote)
{
char tmp[MNT_LINE_MAX];
snprintf(tmp, MNT_LINE_MAX, quote ? ",%s=\"%s\"" : ",%s=%s", name, val);
if (mntopts)
strlcat(mntopts, tmp, MNT_LINE_MAX);
if (mtabopt)
strlcat(mtabopt, tmp, MNT_LINE_MAX);
}
static void
zfs_selinux_setcontext(zfs_handle_t *zhp, zfs_prop_t zpt, const char *name,
char *mntopts, char *mtabopt)
{
char context[ZFS_MAXPROPLEN];
if (zfs_prop_get(zhp, zpt, context, sizeof (context),
NULL, NULL, 0, B_FALSE) == 0) {
if (strcmp(context, "none") != 0)
append_mntopt(name, context, mntopts, mtabopt, B_TRUE);
}
}
void
zfs_adjust_mount_options(zfs_handle_t *zhp, const char *mntpoint,
char *mntopts, char *mtabopt)
{
char prop[ZFS_MAXPROPLEN];
/*
* Checks to see if the ZFS_PROP_SELINUX_CONTEXT exists
* if it does, create a tmp variable in case it's needed
* checks to see if the selinux context is set to the default
* if it is, allow the setting of the other context properties
* this is needed because the 'context' property overrides others
* if it is not the default, set the 'context' property
*/
if (zfs_prop_get(zhp, ZFS_PROP_SELINUX_CONTEXT, prop, sizeof (prop),
NULL, NULL, 0, B_FALSE) == 0) {
if (strcmp(prop, "none") == 0) {
zfs_selinux_setcontext(zhp, ZFS_PROP_SELINUX_FSCONTEXT,
MNTOPT_FSCONTEXT, mntopts, mtabopt);
zfs_selinux_setcontext(zhp, ZFS_PROP_SELINUX_DEFCONTEXT,
MNTOPT_DEFCONTEXT, mntopts, mtabopt);
zfs_selinux_setcontext(zhp,
ZFS_PROP_SELINUX_ROOTCONTEXT, MNTOPT_ROOTCONTEXT,
mntopts, mtabopt);
} else {
append_mntopt(MNTOPT_CONTEXT, prop,
mntopts, mtabopt, B_TRUE);
}
}
/* A hint used to determine an auto-mounted snapshot mount point */
append_mntopt(MNTOPT_MNTPOINT, mntpoint, mntopts, NULL, B_FALSE);
}
/*
* By default the filesystem by preparing the mount options (i.e. parsing
* some flags from the "opts" parameter into the "flags" parameter) and then
* directly calling the system call mount(2). We don't need the mount utility
* or update /etc/mtab, because this is a symlink on all modern systems.
*
* If the environment variable ZFS_MOUNT_HELPER is set, we fall back to the
* previous behavior:
* The filesystem is mounted by invoking the system mount utility rather
* than by the system call mount(2). This ensures that the /etc/mtab
* file is correctly locked for the update. Performing our own locking
* and /etc/mtab update requires making an unsafe assumption about how
* the mount utility performs its locking. Unfortunately, this also means
* in the case of a mount failure we do not have the exact errno. We must
* make due with return value from the mount process.
*/
int
do_mount(zfs_handle_t *zhp, const char *mntpt, char *opts, int flags)
{
const char *src = zfs_get_name(zhp);
int error = 0;
if (!libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
char badopt[MNT_LINE_MAX] = {0};
unsigned long mntflags = flags, zfsflags;
char myopts[MNT_LINE_MAX] = {0};
if (zfs_parse_mount_options(opts, &mntflags,
&zfsflags, 0, badopt, NULL)) {
return (EINVAL);
}
strlcat(myopts, opts, MNT_LINE_MAX);
zfs_adjust_mount_options(zhp, mntpt, myopts, NULL);
if (mount(src, mntpt, MNTTYPE_ZFS, mntflags, myopts)) {
return (errno);
}
} else {
char *argv[9] = {
"/bin/mount",
"--no-canonicalize",
"-t", MNTTYPE_ZFS,
"-o", opts,
(char *)src,
(char *)mntpt,
(char *)NULL };
/* Return only the most critical mount error */
error = libzfs_run_process(argv[0], argv,
STDOUT_VERBOSE|STDERR_VERBOSE);
if (error) {
if (error & MOUNT_FILEIO) {
error = EIO;
} else if (error & MOUNT_USER) {
error = EINTR;
} else if (error & MOUNT_SOFTWARE) {
error = EPIPE;
} else if (error & MOUNT_BUSY) {
error = EBUSY;
} else if (error & MOUNT_SYSERR) {
error = EAGAIN;
} else if (error & MOUNT_USAGE) {
error = EINVAL;
} else
error = ENXIO; /* Generic error */
}
}
return (error);
}
int
do_unmount(zfs_handle_t *zhp, const char *mntpt, int flags)
{
if (!libzfs_envvar_is_set("ZFS_MOUNT_HELPER")) {
int rv = umount2(mntpt, flags);
return (rv < 0 ? errno : 0);
}
char force_opt[] = "-f";
char lazy_opt[] = "-l";
char *argv[7] = {
"/bin/umount",
"-t", MNTTYPE_ZFS,
NULL, NULL, NULL, NULL };
int rc, count = 3;
if (flags & MS_FORCE) {
argv[count] = force_opt;
count++;
}
if (flags & MS_DETACH) {
argv[count] = lazy_opt;
count++;
}
argv[count] = (char *)mntpt;
rc = libzfs_run_process(argv[0], argv, STDOUT_VERBOSE|STDERR_VERBOSE);
return (rc ? EINVAL : 0);
}
int
zfs_mount_delegation_check(void)
{
return ((geteuid() != 0) ? EACCES : 0);
}
+
+/* Called from the tail end of zpool_disable_datasets() */
+void
+zpool_disable_datasets_os(zpool_handle_t *zhp, boolean_t force)
+{
+}
+
+/* Called from the tail end of zfs_unmount() */
+void
+zpool_disable_volume_os(const char *name)
+{
+}
diff --git a/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi b/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi
index 1b018dd063e0..6abec9a8ab5b 100644
--- a/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi
+++ b/sys/contrib/openzfs/lib/libzfs_core/libzfs_core.abi
@@ -1,3685 +1,3061 @@
<abi-corpus architecture='elf-amd-x86_64' soname='libzfs_core.so.3'>
<elf-needed>
<dependency name='libuuid.so.1'/>
<dependency name='libz.so.1'/>
<dependency name='librt.so.1'/>
<dependency name='libm.so.6'/>
<dependency name='libblkid.so.1'/>
<dependency name='libudev.so.1'/>
<dependency name='libnvpair.so.3'/>
<dependency name='libpthread.so.0'/>
<dependency name='libc.so.6'/>
<dependency name='ld-linux-x86-64.so.2'/>
</elf-needed>
<elf-function-symbols>
<elf-symbol name='_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='_sol_getmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_add_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_and_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_cas_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_clear_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_dec_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_inc_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uchar_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_uint_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ulong_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_or_ushort_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_set_long_excl' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_16_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_32_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_64_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_8_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_char_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_int_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_long_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_ptr_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_sub_short_nv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_16' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_32' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_64' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_8' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ptr' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uchar' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_uint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ulong' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='atomic_swap_ushort' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_add' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_destroy_nodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_find' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_first' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_insert_here' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_last' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_nearest' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_numnodes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_swap' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_gt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_update_lt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='avl_walk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_alloc_and_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_alloc_and_read' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_err_check' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_rescan' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_type' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_use_whole_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='efi_write' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='get_system_hostid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getexecname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getextmntent' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getmntany' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='getzoneid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='is_mpath_whole_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_assertf' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_core_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libzfs_core_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_after' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_before' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_insert_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_is_empty' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_active' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_link_replace' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_move_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_next' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_prev' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_head' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_remove_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='list_tail' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_bookmark' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_change_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_channel_program' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_channel_program_nosync' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_clone' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_destroy_bookmarks' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_destroy_snaps' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_exists' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_get_bookmark_props' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_get_bookmarks' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_get_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_get_holds' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_hold' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_initialize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_load_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_pool_checkpoint' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_pool_checkpoint_discard' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_promote' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_receive' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_receive_one' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_receive_resumable' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_receive_with_cmdprops' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_receive_with_header' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_redact' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_release' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_rename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_reopen' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_rollback' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_rollback_to' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_send' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_send_redacted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_send_resume' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_send_resume_redacted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_send_space' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_send_space_resume_redacted' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_set_bootenv' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_snaprange_space' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_snapshot' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_sync' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_trim' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_unload_key' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_wait_fs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzc_wait_tag' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_consumer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_enter' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_exit' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='membar_producer' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='mkdirp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='print_timestamp' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='spl_pagesize' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcat' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='strlcpy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_abandon' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_create' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_destroy' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_dispatch' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_member' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_resume' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_suspend' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_suspended' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='tpool_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='update_vdev_config_dev_strs' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_append_partition' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_basename' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dev_flush' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dev_is_dm' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dev_is_whole_disk' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_device_get_devid' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_device_get_physical' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_dirnamelen' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_enclosure_sysfs_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_get_underlying_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_ioctl_fd' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_isnumber' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicebytes' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicenum' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicenum_format' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_niceraw' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_nicetime' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_resolve_shortname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_strcmp_pathname' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_strip_partition' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zfs_strip_path' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_default_search_paths' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_dump_ddt' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_find_config' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_history_unpack' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_label_disk_wait' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_read_label' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='zpool_search_import' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
<elf-variable-symbols>
<elf-symbol name='efi_debug' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='libspl_assert_ok' size='4' type='object-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-variable-symbols>
- <abi-instr version='1.0' address-size='64' path='libzfs_core.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfs_core' language='LANG_C99'>
- <type-decl name='int' size-in-bits='32' id='type-id-1'/>
- <type-decl name='char' size-in-bits='8' id='type-id-2'/>
- <qualified-type-def type-id='type-id-2' const='yes' id='type-id-3'/>
- <pointer-type-def type-id='type-id-3' size-in-bits='64' id='type-id-4'/>
- <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-5'>
+ <abi-instr version='1.0' address-size='64' path='../../module/avl/avl.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='bf311473' size-in-bits='128' id='f0f65199'>
+ <subrange length='2' type-id='7359adad' id='52efc4ef'/>
+ </array-type-def>
+ <typedef-decl name='avl_tree_t' type-id='b351119f' id='f20fbd51'/>
+ <class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='b351119f'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='nvl_version' type-id='type-id-6' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='nvl_nvflag' type-id='type-id-7' visibility='default'/>
+ <var-decl name='avl_root' type-id='bf311473' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='nvl_priv' type-id='type-id-8' visibility='default'/>
+ <var-decl name='avl_compar' type-id='585e1de9' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='nvl_flag' type-id='type-id-7' visibility='default'/>
+ <var-decl name='avl_offset' type-id='b59d7dce' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='nvl_pad' type-id='type-id-6' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='avl_numnodes' type-id='ee1f298e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='avl_pad' type-id='b59d7dce' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__int32_t' type-id='type-id-1' id='type-id-9'/>
- <typedef-decl name='int32_t' type-id='type-id-9' id='type-id-6'/>
- <type-decl name='unsigned int' size-in-bits='32' id='type-id-10'/>
- <typedef-decl name='__uint32_t' type-id='type-id-10' id='type-id-11'/>
- <typedef-decl name='uint32_t' type-id='type-id-11' id='type-id-7'/>
- <type-decl name='unsigned long int' size-in-bits='64' id='type-id-12'/>
- <typedef-decl name='__uint64_t' type-id='type-id-12' id='type-id-13'/>
- <typedef-decl name='uint64_t' type-id='type-id-13' id='type-id-8'/>
- <typedef-decl name='nvlist_t' type-id='type-id-5' id='type-id-14'/>
- <pointer-type-def type-id='type-id-14' size-in-bits='64' id='type-id-15'/>
- <pointer-type-def type-id='type-id-15' size-in-bits='64' id='type-id-16'/>
- <function-decl name='lzc_get_bootenv' mangled-name='lzc_get_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_bootenv'>
- <parameter type-id='type-id-4' name='pool'/>
- <parameter type-id='type-id-16' name='outnvl'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <qualified-type-def type-id='type-id-14' const='yes' id='type-id-17'/>
- <pointer-type-def type-id='type-id-17' size-in-bits='64' id='type-id-18'/>
- <function-decl name='lzc_set_bootenv' mangled-name='lzc_set_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_set_bootenv'>
- <parameter type-id='type-id-4' name='pool'/>
- <parameter type-id='type-id-18' name='env'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-19'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-20'>
- <underlying-type type-id='type-id-19'/>
- <enumerator name='ZFS_WAIT_DELETEQ' value='0'/>
- <enumerator name='ZFS_WAIT_NUM_ACTIVITIES' value='1'/>
- </enum-decl>
- <typedef-decl name='zfs_wait_activity_t' type-id='type-id-20' id='type-id-21'/>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-22'>
- <underlying-type type-id='type-id-19'/>
- <enumerator name='B_FALSE' value='0'/>
- <enumerator name='B_TRUE' value='1'/>
- </enum-decl>
- <typedef-decl name='boolean_t' type-id='type-id-22' id='type-id-23'/>
- <pointer-type-def type-id='type-id-23' size-in-bits='64' id='type-id-24'/>
- <function-decl name='lzc_wait_fs' mangled-name='lzc_wait_fs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_wait_fs'>
- <parameter type-id='type-id-4' name='fs'/>
- <parameter type-id='type-id-21' name='activity'/>
- <parameter type-id='type-id-24' name='waited'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-25'>
- <underlying-type type-id='type-id-19'/>
- <enumerator name='ZPOOL_WAIT_CKPT_DISCARD' value='0'/>
- <enumerator name='ZPOOL_WAIT_FREE' value='1'/>
- <enumerator name='ZPOOL_WAIT_INITIALIZE' value='2'/>
- <enumerator name='ZPOOL_WAIT_REPLACE' value='3'/>
- <enumerator name='ZPOOL_WAIT_REMOVE' value='4'/>
- <enumerator name='ZPOOL_WAIT_RESILVER' value='5'/>
- <enumerator name='ZPOOL_WAIT_SCRUB' value='6'/>
- <enumerator name='ZPOOL_WAIT_TRIM' value='7'/>
- <enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='8'/>
- </enum-decl>
- <typedef-decl name='zpool_wait_activity_t' type-id='type-id-25' id='type-id-26'/>
- <function-decl name='lzc_wait_tag' mangled-name='lzc_wait_tag' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_wait_tag'>
- <parameter type-id='type-id-4' name='pool'/>
- <parameter type-id='type-id-26' name='activity'/>
- <parameter type-id='type-id-8' name='tag'/>
- <parameter type-id='type-id-24' name='waited'/>
- <return type-id='type-id-1'/>
+ <class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='428b67b3'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='avl_child' type-id='f0f65199' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='avl_pcb' type-id='e475ab95' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='uintptr_t' type-id='7359adad' id='e475ab95'/>
+ <typedef-decl name='ulong_t' type-id='7359adad' id='ee1f298e'/>
+ <typedef-decl name='avl_index_t' type-id='e475ab95' id='fba6cb51'/>
+ <pointer-type-def type-id='fba6cb51' size-in-bits='64' id='32adbf30'/>
+ <pointer-type-def type-id='428b67b3' size-in-bits='64' id='bf311473'/>
+ <pointer-type-def type-id='f20fbd51' size-in-bits='64' id='a3681dea'/>
+ <pointer-type-def type-id='96ee24a5' size-in-bits='64' id='585e1de9'/>
+ <pointer-type-def type-id='eaa32e2f' size-in-bits='64' id='63e171df'/>
+ <function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy_nodes'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='63e171df' name='cookie'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='lzc_wait' mangled-name='lzc_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_wait'>
- <parameter type-id='type-id-4' name='pool'/>
- <parameter type-id='type-id-26' name='activity'/>
- <parameter type-id='type-id-24' name='waited'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_is_empty' mangled-name='avl_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_is_empty'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='lzc_redact' mangled-name='lzc_redact' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_redact'>
- <parameter type-id='type-id-4' name='snapshot'/>
- <parameter type-id='type-id-4' name='bookname'/>
- <parameter type-id='type-id-15' name='snapnv'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_numnodes' mangled-name='avl_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_numnodes'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <return type-id='ee1f298e'/>
</function-decl>
- <enum-decl name='pool_trim_func' id='type-id-27'>
- <underlying-type type-id='type-id-19'/>
- <enumerator name='POOL_TRIM_START' value='0'/>
- <enumerator name='POOL_TRIM_CANCEL' value='1'/>
- <enumerator name='POOL_TRIM_SUSPEND' value='2'/>
- <enumerator name='POOL_TRIM_FUNCS' value='3'/>
- </enum-decl>
- <typedef-decl name='pool_trim_func_t' type-id='type-id-27' id='type-id-28'/>
- <function-decl name='lzc_trim' mangled-name='lzc_trim' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_trim'>
- <parameter type-id='type-id-4' name='poolname'/>
- <parameter type-id='type-id-28' name='cmd_type'/>
- <parameter type-id='type-id-8' name='rate'/>
- <parameter type-id='type-id-23' name='secure'/>
- <parameter type-id='type-id-15' name='vdevs'/>
- <parameter type-id='type-id-16' name='errlist'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <enum-decl name='pool_initialize_func' id='type-id-29'>
- <underlying-type type-id='type-id-19'/>
- <enumerator name='POOL_INITIALIZE_START' value='0'/>
- <enumerator name='POOL_INITIALIZE_CANCEL' value='1'/>
- <enumerator name='POOL_INITIALIZE_SUSPEND' value='2'/>
- <enumerator name='POOL_INITIALIZE_FUNCS' value='3'/>
- </enum-decl>
- <typedef-decl name='pool_initialize_func_t' type-id='type-id-29' id='type-id-30'/>
- <function-decl name='lzc_initialize' mangled-name='lzc_initialize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_initialize'>
- <parameter type-id='type-id-4' name='poolname'/>
- <parameter type-id='type-id-30' name='cmd_type'/>
- <parameter type-id='type-id-15' name='vdevs'/>
- <parameter type-id='type-id-16' name='errlist'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_reopen' mangled-name='lzc_reopen' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_reopen'>
- <parameter type-id='type-id-4' name='pool_name'/>
- <parameter type-id='type-id-23' name='scrub_restart'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <type-decl name='unsigned char' size-in-bits='8' id='type-id-31'/>
- <typedef-decl name='__uint8_t' type-id='type-id-31' id='type-id-32'/>
- <typedef-decl name='uint8_t' type-id='type-id-32' id='type-id-33'/>
- <pointer-type-def type-id='type-id-33' size-in-bits='64' id='type-id-34'/>
- <typedef-decl name='uint_t' type-id='type-id-10' id='type-id-35'/>
- <function-decl name='lzc_change_key' mangled-name='lzc_change_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_change_key'>
- <parameter type-id='type-id-4' name='fsname'/>
- <parameter type-id='type-id-8' name='crypt_cmd'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-34' name='wkeydata'/>
- <parameter type-id='type-id-35' name='wkeylen'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_create'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='585e1de9' name='compar'/>
+ <parameter type-id='b59d7dce' name='size'/>
+ <parameter type-id='b59d7dce' name='offset'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_unload_key' mangled-name='lzc_unload_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_unload_key'>
- <parameter type-id='type-id-4' name='fsname'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_swap' mangled-name='avl_swap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_swap'>
+ <parameter type-id='a3681dea' name='tree1'/>
+ <parameter type-id='a3681dea' name='tree2'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_load_key' mangled-name='lzc_load_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_load_key'>
- <parameter type-id='type-id-4' name='fsname'/>
- <parameter type-id='type-id-23' name='noop'/>
- <parameter type-id='type-id-34' name='wkeydata'/>
- <parameter type-id='type-id-35' name='wkeylen'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_update' mangled-name='avl_update' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update'>
+ <parameter type-id='a3681dea' name='t'/>
+ <parameter type-id='eaa32e2f' name='obj'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='lzc_channel_program_nosync' mangled-name='lzc_channel_program_nosync' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_channel_program_nosync'>
- <parameter type-id='type-id-4' name='pool'/>
- <parameter type-id='type-id-4' name='program'/>
- <parameter type-id='type-id-8' name='timeout'/>
- <parameter type-id='type-id-8' name='memlimit'/>
- <parameter type-id='type-id-15' name='argnvl'/>
- <parameter type-id='type-id-16' name='outnvl'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_update_gt' mangled-name='avl_update_gt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_gt'>
+ <parameter type-id='a3681dea' name='t'/>
+ <parameter type-id='eaa32e2f' name='obj'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='lzc_pool_checkpoint_discard' mangled-name='lzc_pool_checkpoint_discard' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_pool_checkpoint_discard'>
- <parameter type-id='type-id-4' name='pool'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_update_lt' mangled-name='avl_update_lt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_lt'>
+ <parameter type-id='a3681dea' name='t'/>
+ <parameter type-id='eaa32e2f' name='obj'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='lzc_pool_checkpoint' mangled-name='lzc_pool_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_pool_checkpoint'>
- <parameter type-id='type-id-4' name='pool'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_remove' mangled-name='avl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_remove'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='eaa32e2f' name='data'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_channel_program' mangled-name='lzc_channel_program' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_channel_program'>
- <parameter type-id='type-id-4' name='pool'/>
- <parameter type-id='type-id-4' name='program'/>
- <parameter type-id='type-id-8' name='timeout'/>
- <parameter type-id='type-id-8' name='memlimit'/>
- <parameter type-id='type-id-15' name='argnvl'/>
- <parameter type-id='type-id-16' name='outnvl'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_add' mangled-name='avl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_add'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='eaa32e2f' name='new_node'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_destroy_bookmarks' mangled-name='lzc_destroy_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_destroy_bookmarks'>
- <parameter type-id='type-id-15' name='bmarks'/>
- <parameter type-id='type-id-16' name='errlist'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_insert_here' mangled-name='avl_insert_here' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert_here'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='eaa32e2f' name='new_data'/>
+ <parameter type-id='eaa32e2f' name='here'/>
+ <parameter type-id='95e97e5e' name='direction'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_get_bookmark_props' mangled-name='lzc_get_bookmark_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_bookmark_props'>
- <parameter type-id='type-id-4' name='bookmark'/>
- <parameter type-id='type-id-16' name='props'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_insert' mangled-name='avl_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='eaa32e2f' name='new_data'/>
+ <parameter type-id='fba6cb51' name='where'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_get_bookmarks' mangled-name='lzc_get_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_bookmarks'>
- <parameter type-id='type-id-4' name='fsname'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-16' name='bmarks'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_find'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='eaa32e2f' name='value'/>
+ <parameter type-id='32adbf30' name='where'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='lzc_bookmark' mangled-name='lzc_bookmark' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_bookmark'>
- <parameter type-id='type-id-15' name='bmarks'/>
- <parameter type-id='type-id-16' name='errlist'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_nearest' mangled-name='avl_nearest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_nearest'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='fba6cb51' name='where'/>
+ <parameter type-id='95e97e5e' name='direction'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='lzc_rollback_to' mangled-name='lzc_rollback_to' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_rollback_to'>
- <parameter type-id='type-id-4' name='fsname'/>
- <parameter type-id='type-id-4' name='snapname'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_last' mangled-name='avl_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_last'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <pointer-type-def type-id='type-id-2' size-in-bits='64' id='type-id-36'/>
- <function-decl name='lzc_rollback' mangled-name='lzc_rollback' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_rollback'>
- <parameter type-id='type-id-4' name='fsname'/>
- <parameter type-id='type-id-36' name='snapnamebuf'/>
- <parameter type-id='type-id-1' name='snapnamelen'/>
- <return type-id='type-id-1'/>
+ <function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_first'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <class-decl name='dmu_replay_record' size-in-bits='2496' is-struct='yes' visibility='default' id='type-id-37'>
+ <function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_walk'>
+ <parameter type-id='a3681dea' name='tree'/>
+ <parameter type-id='eaa32e2f' name='oldnode'/>
+ <parameter type-id='95e97e5e' name='left'/>
+ <return type-id='eaa32e2f'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='96ee24a5'>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='eaa32e2f'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <type-decl name='int' size-in-bits='32' id='95e97e5e'/>
+ <typedef-decl name='boolean_t' type-id='40ed39d2' id='c19b74c3'/>
+ <typedef-decl name='size_t' type-id='7359adad' id='b59d7dce'/>
+ <type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
+ <type-decl name='void' id='48b5725f'/>
+ <pointer-type-def type-id='48b5725f' size-in-bits='64' id='eaa32e2f'/>
+ <enum-decl name='__anonymous_enum__1' is-anonymous='yes' id='40ed39d2'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='B_FALSE' value='0'/>
+ <enumerator name='B_TRUE' value='1'/>
+ </enum-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='rdwr_efi.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='288' id='16e6f2c6'>
+ <subrange length='36' type-id='7359adad' id='ae666bde'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a65ae39c' size-in-bits='960' id='fa198beb'>
+ <subrange length='1' type-id='7359adad' id='52f813b4'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='3502e3ff' size-in-bits='384' id='dba89ba3'>
+ <subrange length='12' type-id='7359adad' id='84827bdc'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='3502e3ff' size-in-bits='256' id='01d84ed4'>
+ <subrange length='8' type-id='7359adad' id='56e0c0b1'/>
+ </array-type-def>
+ <class-decl name='dk_gpt' size-in-bits='1920' is-struct='yes' visibility='default' id='dd4a2e5a'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_type' type-id='type-id-38' visibility='default'/>
+ <var-decl name='efi_version' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='drr_payloadlen' type-id='type-id-7' visibility='default'/>
+ <var-decl name='efi_nparts' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_u' type-id='type-id-39' visibility='default'/>
- </data-member>
- </class-decl>
- <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='type-id-38'>
- <underlying-type type-id='type-id-19'/>
- <enumerator name='DRR_BEGIN' value='0'/>
- <enumerator name='DRR_OBJECT' value='1'/>
- <enumerator name='DRR_FREEOBJECTS' value='2'/>
- <enumerator name='DRR_WRITE' value='3'/>
- <enumerator name='DRR_FREE' value='4'/>
- <enumerator name='DRR_END' value='5'/>
- <enumerator name='DRR_WRITE_BYREF' value='6'/>
- <enumerator name='DRR_SPILL' value='7'/>
- <enumerator name='DRR_WRITE_EMBEDDED' value='8'/>
- <enumerator name='DRR_OBJECT_RANGE' value='9'/>
- <enumerator name='DRR_REDACT' value='10'/>
- <enumerator name='DRR_NUMTYPES' value='11'/>
- </enum-decl>
- <union-decl name='__anonymous_union__' size-in-bits='2432' is-anonymous='yes' visibility='default' id='type-id-39'>
- <data-member access='private'>
- <var-decl name='drr_begin' type-id='type-id-40' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_end' type-id='type-id-41' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_object' type-id='type-id-42' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_freeobjects' type-id='type-id-43' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_write' type-id='type-id-44' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_free' type-id='type-id-45' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_write_byref' type-id='type-id-46' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_spill' type-id='type-id-47' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_write_embedded' type-id='type-id-48' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_object_range' type-id='type-id-49' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_redact' type-id='type-id-50' visibility='default'/>
- </data-member>
- <data-member access='private'>
- <var-decl name='drr_checksum' type-id='type-id-51' visibility='default'/>
+ <var-decl name='efi_part_size' type-id='3502e3ff' visibility='default'/>
</data-member>
- </union-decl>
- <class-decl name='drr_begin' size-in-bits='2432' is-struct='yes' visibility='default' id='type-id-40'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_magic' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_versioninfo' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='efi_lbasize' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_creation_time' type-id='type-id-8' visibility='default'/>
+ <var-decl name='efi_last_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_type' type-id='type-id-52' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='drr_flags' type-id='type-id-7' visibility='default'/>
+ <var-decl name='efi_first_u_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
+ <var-decl name='efi_last_u_lba' type-id='804dc465' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_fromguid' type-id='type-id-8' visibility='default'/>
+ <var-decl name='efi_disk_uguid' type-id='214f32ea' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_toname' type-id='type-id-53' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='efi_flags' type-id='3502e3ff' visibility='default'/>
</data-member>
- </class-decl>
- <enum-decl name='dmu_objset_type' id='type-id-54'>
- <underlying-type type-id='type-id-19'/>
- <enumerator name='DMU_OST_NONE' value='0'/>
- <enumerator name='DMU_OST_META' value='1'/>
- <enumerator name='DMU_OST_ZFS' value='2'/>
- <enumerator name='DMU_OST_ZVOL' value='3'/>
- <enumerator name='DMU_OST_OTHER' value='4'/>
- <enumerator name='DMU_OST_ANY' value='5'/>
- <enumerator name='DMU_OST_NUMTYPES' value='6'/>
- </enum-decl>
- <typedef-decl name='dmu_objset_type_t' type-id='type-id-54' id='type-id-52'/>
-
- <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='2048' id='type-id-53'>
- <subrange length='256' type-id='type-id-12' id='type-id-55'/>
-
- </array-type-def>
- <class-decl name='drr_end' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-41'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_checksum' type-id='type-id-56' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='480'>
+ <var-decl name='efi_reserved1' type-id='3502e3ff' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='efi_altern_lba' type-id='804dc465' visibility='default'/>
</data-member>
- </class-decl>
- <class-decl name='zio_cksum' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-57'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zc_word' type-id='type-id-58' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='efi_reserved' type-id='dba89ba3' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='960'>
+ <var-decl name='efi_parts' type-id='fa198beb' visibility='default'/>
</data-member>
</class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-8' size-in-bits='256' id='type-id-58'>
- <subrange length='4' type-id='type-id-12' id='type-id-59'/>
-
- </array-type-def>
- <typedef-decl name='zio_cksum_t' type-id='type-id-57' id='type-id-56'/>
- <class-decl name='drr_object' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-42'>
+ <typedef-decl name='diskaddr_t' type-id='9b3ff54f' id='804dc465'/>
+ <typedef-decl name='longlong_t' type-id='1eb56b1e' id='9b3ff54f'/>
+ <class-decl name='uuid' size-in-bits='128' is-struct='yes' visibility='default' id='214f32ea'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_type' type-id='type-id-60' visibility='default'/>
+ <var-decl name='time_low' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='drr_bonustype' type-id='type-id-60' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='time_mid' type-id='149c6638' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_blksz' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='48'>
+ <var-decl name='time_hi_and_version' type-id='149c6638' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='drr_bonuslen' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='clock_seq_hi_and_reserved' type-id='b96825af' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_checksumtype' type-id='type-id-33' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='72'>
+ <var-decl name='clock_seq_low' type-id='b96825af' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='200'>
- <var-decl name='drr_compress' type-id='type-id-33' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='80'>
+ <var-decl name='node_addr' type-id='0f562bd0' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='208'>
- <var-decl name='drr_dn_slots' type-id='type-id-33' visibility='default'/>
+ </class-decl>
+ <class-decl name='dk_part' size-in-bits='960' is-struct='yes' visibility='default' id='a65ae39c'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='p_start' type-id='804dc465' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='216'>
- <var-decl name='drr_flags' type-id='type-id-33' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='p_size' type-id='804dc465' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='drr_raw_bonuslen' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='p_guid' type-id='214f32ea' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
+ <var-decl name='p_tag' type-id='d908a348' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_indblkshift' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='328'>
- <var-decl name='drr_nlevels' type-id='type-id-33' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='272'>
+ <var-decl name='p_flag' type-id='d908a348' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='336'>
- <var-decl name='drr_nblkptr' type-id='type-id-33' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='p_name' type-id='16e6f2c6' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='344'>
- <var-decl name='drr_pad' type-id='type-id-61' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='p_uguid' type-id='214f32ea' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_maxblkid' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='704'>
+ <var-decl name='p_resv' type-id='01d84ed4' visibility='default'/>
</data-member>
</class-decl>
- <enum-decl name='dmu_object_type' id='type-id-62'>
- <underlying-type type-id='type-id-19'/>
- <enumerator name='DMU_OT_NONE' value='0'/>
- <enumerator name='DMU_OT_OBJECT_DIRECTORY' value='1'/>
- <enumerator name='DMU_OT_OBJECT_ARRAY' value='2'/>
- <enumerator name='DMU_OT_PACKED_NVLIST' value='3'/>
- <enumerator name='DMU_OT_PACKED_NVLIST_SIZE' value='4'/>
- <enumerator name='DMU_OT_BPOBJ' value='5'/>
- <enumerator name='DMU_OT_BPOBJ_HDR' value='6'/>
- <enumerator name='DMU_OT_SPACE_MAP_HEADER' value='7'/>
- <enumerator name='DMU_OT_SPACE_MAP' value='8'/>
- <enumerator name='DMU_OT_INTENT_LOG' value='9'/>
- <enumerator name='DMU_OT_DNODE' value='10'/>
- <enumerator name='DMU_OT_OBJSET' value='11'/>
- <enumerator name='DMU_OT_DSL_DIR' value='12'/>
- <enumerator name='DMU_OT_DSL_DIR_CHILD_MAP' value='13'/>
- <enumerator name='DMU_OT_DSL_DS_SNAP_MAP' value='14'/>
- <enumerator name='DMU_OT_DSL_PROPS' value='15'/>
- <enumerator name='DMU_OT_DSL_DATASET' value='16'/>
- <enumerator name='DMU_OT_ZNODE' value='17'/>
- <enumerator name='DMU_OT_OLDACL' value='18'/>
- <enumerator name='DMU_OT_PLAIN_FILE_CONTENTS' value='19'/>
- <enumerator name='DMU_OT_DIRECTORY_CONTENTS' value='20'/>
- <enumerator name='DMU_OT_MASTER_NODE' value='21'/>
- <enumerator name='DMU_OT_UNLINKED_SET' value='22'/>
- <enumerator name='DMU_OT_ZVOL' value='23'/>
- <enumerator name='DMU_OT_ZVOL_PROP' value='24'/>
- <enumerator name='DMU_OT_PLAIN_OTHER' value='25'/>
- <enumerator name='DMU_OT_UINT64_OTHER' value='26'/>
- <enumerator name='DMU_OT_ZAP_OTHER' value='27'/>
- <enumerator name='DMU_OT_ERROR_LOG' value='28'/>
- <enumerator name='DMU_OT_SPA_HISTORY' value='29'/>
- <enumerator name='DMU_OT_SPA_HISTORY_OFFSETS' value='30'/>
- <enumerator name='DMU_OT_POOL_PROPS' value='31'/>
- <enumerator name='DMU_OT_DSL_PERMS' value='32'/>
- <enumerator name='DMU_OT_ACL' value='33'/>
- <enumerator name='DMU_OT_SYSACL' value='34'/>
- <enumerator name='DMU_OT_FUID' value='35'/>
- <enumerator name='DMU_OT_FUID_SIZE' value='36'/>
- <enumerator name='DMU_OT_NEXT_CLONES' value='37'/>
- <enumerator name='DMU_OT_SCAN_QUEUE' value='38'/>
- <enumerator name='DMU_OT_USERGROUP_USED' value='39'/>
- <enumerator name='DMU_OT_USERGROUP_QUOTA' value='40'/>
- <enumerator name='DMU_OT_USERREFS' value='41'/>
- <enumerator name='DMU_OT_DDT_ZAP' value='42'/>
- <enumerator name='DMU_OT_DDT_STATS' value='43'/>
- <enumerator name='DMU_OT_SA' value='44'/>
- <enumerator name='DMU_OT_SA_MASTER_NODE' value='45'/>
- <enumerator name='DMU_OT_SA_ATTR_REGISTRATION' value='46'/>
- <enumerator name='DMU_OT_SA_ATTR_LAYOUTS' value='47'/>
- <enumerator name='DMU_OT_SCAN_XLATE' value='48'/>
- <enumerator name='DMU_OT_DEDUP' value='49'/>
- <enumerator name='DMU_OT_DEADLIST' value='50'/>
- <enumerator name='DMU_OT_DEADLIST_HDR' value='51'/>
- <enumerator name='DMU_OT_DSL_CLONES' value='52'/>
- <enumerator name='DMU_OT_BPOBJ_SUBOBJ' value='53'/>
- <enumerator name='DMU_OT_NUMTYPES' value='54'/>
- <enumerator name='DMU_OTN_UINT8_DATA' value='128'/>
- <enumerator name='DMU_OTN_UINT8_METADATA' value='192'/>
- <enumerator name='DMU_OTN_UINT16_DATA' value='129'/>
- <enumerator name='DMU_OTN_UINT16_METADATA' value='193'/>
- <enumerator name='DMU_OTN_UINT32_DATA' value='130'/>
- <enumerator name='DMU_OTN_UINT32_METADATA' value='194'/>
- <enumerator name='DMU_OTN_UINT64_DATA' value='131'/>
- <enumerator name='DMU_OTN_UINT64_METADATA' value='195'/>
- <enumerator name='DMU_OTN_ZAP_DATA' value='132'/>
- <enumerator name='DMU_OTN_ZAP_METADATA' value='196'/>
- <enumerator name='DMU_OTN_UINT8_ENC_DATA' value='160'/>
- <enumerator name='DMU_OTN_UINT8_ENC_METADATA' value='224'/>
- <enumerator name='DMU_OTN_UINT16_ENC_DATA' value='161'/>
- <enumerator name='DMU_OTN_UINT16_ENC_METADATA' value='225'/>
- <enumerator name='DMU_OTN_UINT32_ENC_DATA' value='162'/>
- <enumerator name='DMU_OTN_UINT32_ENC_METADATA' value='226'/>
- <enumerator name='DMU_OTN_UINT64_ENC_DATA' value='163'/>
- <enumerator name='DMU_OTN_UINT64_ENC_METADATA' value='227'/>
- <enumerator name='DMU_OTN_ZAP_ENC_DATA' value='164'/>
- <enumerator name='DMU_OTN_ZAP_ENC_METADATA' value='228'/>
- </enum-decl>
- <typedef-decl name='dmu_object_type_t' type-id='type-id-62' id='type-id-60'/>
-
- <array-type-def dimensions='1' type-id='type-id-33' size-in-bits='40' id='type-id-61'>
- <subrange length='5' type-id='type-id-12' id='type-id-63'/>
-
- </array-type-def>
- <class-decl name='drr_freeobjects' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-43'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_firstobj' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_numobjs' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='drr_write' size-in-bits='1088' is-struct='yes' visibility='default' id='type-id-44'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_type' type-id='type-id-60' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='drr_pad' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_offset' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_logical_size' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_checksumtype' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='328'>
- <var-decl name='drr_flags' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='336'>
- <var-decl name='drr_compressiontype' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='344'>
- <var-decl name='drr_pad2' type-id='type-id-61' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_key' type-id='type-id-64' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='drr_compressed_size' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='drr_salt' type-id='type-id-65' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='drr_iv' type-id='type-id-66' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='928'>
- <var-decl name='drr_mac' type-id='type-id-67' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='ddt_key' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-68'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='ddk_cksum' type-id='type-id-56' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='ddk_prop' type-id='type-id-8' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='ddt_key_t' type-id='type-id-68' id='type-id-64'/>
-
- <array-type-def dimensions='1' type-id='type-id-33' size-in-bits='64' id='type-id-65'>
- <subrange length='8' type-id='type-id-12' id='type-id-69'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-33' size-in-bits='96' id='type-id-66'>
- <subrange length='12' type-id='type-id-12' id='type-id-70'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-33' size-in-bits='128' id='type-id-67'>
- <subrange length='16' type-id='type-id-12' id='type-id-71'/>
-
- </array-type-def>
- <class-decl name='drr_free' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-45'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_offset' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_length' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='drr_write_byref' size-in-bits='832' is-struct='yes' visibility='default' id='type-id-46'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_offset' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_length' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_refguid' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_refobject' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_refoffset' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='drr_checksumtype' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='456'>
- <var-decl name='drr_flags' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='464'>
- <var-decl name='drr_pad2' type-id='type-id-72' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='drr_key' type-id='type-id-64' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-33' size-in-bits='48' id='type-id-72'>
- <subrange length='6' type-id='type-id-12' id='type-id-73'/>
-
- </array-type-def>
- <class-decl name='drr_spill' size-in-bits='640' is-struct='yes' visibility='default' id='type-id-47'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_length' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_flags' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='200'>
- <var-decl name='drr_compressiontype' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='208'>
- <var-decl name='drr_pad' type-id='type-id-72' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_compressed_size' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_salt' type-id='type-id-65' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='drr_iv' type-id='type-id-66' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='drr_mac' type-id='type-id-67' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='608'>
- <var-decl name='drr_type' type-id='type-id-60' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='drr_write_embedded' size-in-bits='384' is-struct='yes' visibility='default' id='type-id-48'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_offset' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_length' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_compression' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='264'>
- <var-decl name='drr_etype' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='272'>
- <var-decl name='drr_pad' type-id='type-id-72' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='drr_lsize' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='drr_psize' type-id='type-id-7' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='drr_object_range' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-49'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_firstobj' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_numslots' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_salt' type-id='type-id-65' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='drr_iv' type-id='type-id-66' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='drr_mac' type-id='type-id-67' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='drr_flags' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='488'>
- <var-decl name='drr_pad' type-id='type-id-74' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-33' size-in-bits='24' id='type-id-74'>
- <subrange length='3' type-id='type-id-12' id='type-id-75'/>
-
- </array-type-def>
- <class-decl name='drr_redact' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-50'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_object' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='drr_offset' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='drr_length' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='drr_toguid' type-id='type-id-8' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='drr_checksum' size-in-bits='2432' is-struct='yes' visibility='default' id='type-id-51'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='drr_pad' type-id='type-id-76' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='2176'>
- <var-decl name='drr_checksum' type-id='type-id-56' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-8' size-in-bits='2176' id='type-id-76'>
- <subrange length='34' type-id='type-id-12' id='type-id-77'/>
-
- </array-type-def>
- <typedef-decl name='dmu_replay_record_t' type-id='type-id-37' id='type-id-78'/>
- <qualified-type-def type-id='type-id-78' const='yes' id='type-id-79'/>
- <pointer-type-def type-id='type-id-79' size-in-bits='64' id='type-id-80'/>
- <pointer-type-def type-id='type-id-8' size-in-bits='64' id='type-id-81'/>
- <function-decl name='lzc_receive_with_cmdprops' mangled-name='lzc_receive_with_cmdprops' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_with_cmdprops'>
- <parameter type-id='type-id-4' name='snapname'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-15' name='cmdprops'/>
- <parameter type-id='type-id-34' name='wkeydata'/>
- <parameter type-id='type-id-35' name='wkeylen'/>
- <parameter type-id='type-id-4' name='origin'/>
- <parameter type-id='type-id-23' name='force'/>
- <parameter type-id='type-id-23' name='resumable'/>
- <parameter type-id='type-id-23' name='raw'/>
- <parameter type-id='type-id-1' name='input_fd'/>
- <parameter type-id='type-id-80' name='begin_record'/>
- <parameter type-id='type-id-1' name='cleanup_fd'/>
- <parameter type-id='type-id-81' name='read_bytes'/>
- <parameter type-id='type-id-81' name='errflags'/>
- <parameter type-id='type-id-81' name='action_handle'/>
- <parameter type-id='type-id-16' name='errors'/>
- <return type-id='type-id-1'/>
+ <typedef-decl name='ushort_t' type-id='8efea9e5' id='d908a348'/>
+ <pointer-type-def type-id='dd4a2e5a' size-in-bits='64' id='0d8119a8'/>
+ <pointer-type-def type-id='0d8119a8' size-in-bits='64' id='c43b27a6'/>
+ <var-decl name='efi_debug' type-id='95e97e5e' mangled-name='efi_debug' visibility='default' elf-symbol-id='efi_debug'/>
+ <function-decl name='efi_err_check' mangled-name='efi_err_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_err_check'>
+ <parameter type-id='0d8119a8' name='vtoc'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_receive_one' mangled-name='lzc_receive_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_one'>
- <parameter type-id='type-id-4' name='snapname'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-4' name='origin'/>
- <parameter type-id='type-id-23' name='force'/>
- <parameter type-id='type-id-23' name='resumable'/>
- <parameter type-id='type-id-23' name='raw'/>
- <parameter type-id='type-id-1' name='input_fd'/>
- <parameter type-id='type-id-80' name='begin_record'/>
- <parameter type-id='type-id-1' name='cleanup_fd'/>
- <parameter type-id='type-id-81' name='read_bytes'/>
- <parameter type-id='type-id-81' name='errflags'/>
- <parameter type-id='type-id-81' name='action_handle'/>
- <parameter type-id='type-id-16' name='errors'/>
- <return type-id='type-id-1'/>
+ <function-decl name='efi_type' mangled-name='efi_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_type'>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='lzc_receive_with_header' mangled-name='lzc_receive_with_header' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_with_header'>
- <parameter type-id='type-id-4' name='snapname'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-4' name='origin'/>
- <parameter type-id='type-id-23' name='force'/>
- <parameter type-id='type-id-23' name='resumable'/>
- <parameter type-id='type-id-23' name='raw'/>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-80' name='begin_record'/>
- <return type-id='type-id-1'/>
+ <function-decl name='efi_free' mangled-name='efi_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_free'>
+ <parameter type-id='0d8119a8' name='ptr'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_receive_resumable' mangled-name='lzc_receive_resumable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_resumable'>
- <parameter type-id='type-id-4' name='snapname'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-4' name='origin'/>
- <parameter type-id='type-id-23' name='force'/>
- <parameter type-id='type-id-23' name='raw'/>
- <parameter type-id='type-id-1' name='fd'/>
- <return type-id='type-id-1'/>
+ <function-decl name='efi_write' mangled-name='efi_write' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_write'>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='0d8119a8' name='vtoc'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='lzc_receive' mangled-name='lzc_receive' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive'>
- <parameter type-id='type-id-4' name='snapname'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-4' name='origin'/>
- <parameter type-id='type-id-23' name='force'/>
- <parameter type-id='type-id-23' name='raw'/>
- <parameter type-id='type-id-1' name='fd'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <enum-decl name='lzc_send_flags' id='type-id-82'>
- <underlying-type type-id='type-id-19'/>
- <enumerator name='LZC_SEND_FLAG_EMBED_DATA' value='1'/>
- <enumerator name='LZC_SEND_FLAG_LARGE_BLOCK' value='2'/>
- <enumerator name='LZC_SEND_FLAG_COMPRESS' value='4'/>
- <enumerator name='LZC_SEND_FLAG_RAW' value='8'/>
- <enumerator name='LZC_SEND_FLAG_SAVED' value='16'/>
- </enum-decl>
- <function-decl name='lzc_send_space' mangled-name='lzc_send_space' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_space'>
- <parameter type-id='type-id-4' name='snapname'/>
- <parameter type-id='type-id-4' name='from'/>
- <parameter type-id='type-id-82' name='flags'/>
- <parameter type-id='type-id-81' name='spacep'/>
- <return type-id='type-id-1'/>
+ <function-decl name='efi_use_whole_disk' mangled-name='efi_use_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_use_whole_disk'>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='lzc_send_space_resume_redacted' mangled-name='lzc_send_space_resume_redacted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_space_resume_redacted'>
- <parameter type-id='type-id-4' name='snapname'/>
- <parameter type-id='type-id-4' name='from'/>
- <parameter type-id='type-id-82' name='flags'/>
- <parameter type-id='type-id-8' name='resumeobj'/>
- <parameter type-id='type-id-8' name='resumeoff'/>
- <parameter type-id='type-id-8' name='resume_bytes'/>
- <parameter type-id='type-id-4' name='redactbook'/>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-81' name='spacep'/>
- <return type-id='type-id-1'/>
+ <function-decl name='efi_rescan' mangled-name='efi_rescan' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_rescan'>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='lzc_send_resume_redacted' mangled-name='lzc_send_resume_redacted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_resume_redacted'>
- <parameter type-id='type-id-4' name='snapname'/>
- <parameter type-id='type-id-4' name='from'/>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-82' name='flags'/>
- <parameter type-id='type-id-8' name='resumeobj'/>
- <parameter type-id='type-id-8' name='resumeoff'/>
- <parameter type-id='type-id-4' name='redactbook'/>
- <return type-id='type-id-1'/>
+ <function-decl name='efi_alloc_and_read' mangled-name='efi_alloc_and_read' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_alloc_and_read'>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='c43b27a6' name='vtoc'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='lzc_send_resume' mangled-name='lzc_send_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_resume'>
- <parameter type-id='type-id-4' name='snapname'/>
- <parameter type-id='type-id-4' name='from'/>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-82' name='flags'/>
- <parameter type-id='type-id-8' name='resumeobj'/>
- <parameter type-id='type-id-8' name='resumeoff'/>
- <return type-id='type-id-1'/>
+ <function-decl name='efi_alloc_and_init' mangled-name='efi_alloc_and_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_alloc_and_init'>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='8f92235e' name='nparts'/>
+ <parameter type-id='c43b27a6' name='vtoc'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <type-decl name='char' size-in-bits='8' id='a84c031d'/>
+ <type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
+ <typedef-decl name='uint16_t' type-id='253c2d2a' id='149c6638'/>
+ <typedef-decl name='uint32_t' type-id='62f1140c' id='8f92235e'/>
+ <typedef-decl name='uint8_t' type-id='c51d6389' id='b96825af'/>
+ <typedef-decl name='uint_t' type-id='f0981eeb' id='3502e3ff'/>
+ <array-type-def dimensions='1' type-id='b96825af' size-in-bits='48' id='0f562bd0'>
+ <subrange length='6' type-id='7359adad' id='52fa524b'/>
+ </array-type-def>
+ <type-decl name='unsigned short int' size-in-bits='16' id='8efea9e5'/>
+ <typedef-decl name='__uint16_t' type-id='8efea9e5' id='253c2d2a'/>
+ <typedef-decl name='__uint32_t' type-id='f0981eeb' id='62f1140c'/>
+ <typedef-decl name='__uint8_t' type-id='002ac4a6' id='c51d6389'/>
+ <type-decl name='unsigned int' size-in-bits='32' id='f0981eeb'/>
+ <type-decl name='unsigned char' size-in-bits='8' id='002ac4a6'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='assert.c' language='LANG_C99'>
+ <type-decl name='variadic parameter type' id='2c1145c5'/>
+ <var-decl name='libspl_assert_ok' type-id='95e97e5e' mangled-name='libspl_assert_ok' visibility='default' elf-symbol-id='libspl_assert_ok'/>
+ <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_assertf'>
+ <parameter type-id='80f4b756' name='file'/>
+ <parameter type-id='80f4b756' name='func'/>
+ <parameter type-id='95e97e5e' name='line'/>
+ <parameter type-id='80f4b756' name='format'/>
+ <parameter is-variadic='yes'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_send_redacted' mangled-name='lzc_send_redacted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_redacted'>
- <parameter type-id='type-id-4' name='snapname'/>
- <parameter type-id='type-id-4' name='from'/>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-82' name='flags'/>
- <parameter type-id='type-id-4' name='redactbook'/>
- <return type-id='type-id-1'/>
+ <pointer-type-def type-id='9b45d938' size-in-bits='64' id='80f4b756'/>
+ <qualified-type-def type-id='a84c031d' const='yes' id='9b45d938'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='atomic.c' language='LANG_C99'>
+ <type-decl name='signed char' size-in-bits='8' id='28577a57'/>
+ <type-decl name='unsigned short int' size-in-bits='16' id='8efea9e5'/>
+ <typedef-decl name='uint16_t' type-id='253c2d2a' id='149c6638'/>
+ <typedef-decl name='__uint16_t' type-id='8efea9e5' id='253c2d2a'/>
+ <typedef-decl name='int16_t' type-id='03896e23' id='23bd8cb5'/>
+ <typedef-decl name='__int16_t' type-id='a2185560' id='03896e23'/>
+ <typedef-decl name='int8_t' type-id='2171a512' id='ee31ee44'/>
+ <typedef-decl name='__int8_t' type-id='28577a57' id='2171a512'/>
+ <qualified-type-def type-id='149c6638' volatile='yes' id='5120c5f7'/>
+ <pointer-type-def type-id='5120c5f7' size-in-bits='64' id='93977ae7'/>
+ <qualified-type-def type-id='8f92235e' volatile='yes' id='430e0681'/>
+ <pointer-type-def type-id='430e0681' size-in-bits='64' id='3a147f31'/>
+ <qualified-type-def type-id='b96825af' volatile='yes' id='84ff7d66'/>
+ <pointer-type-def type-id='84ff7d66' size-in-bits='64' id='aa323ea4'/>
+ <qualified-type-def type-id='ee1f298e' volatile='yes' id='6f7e09cb'/>
+ <pointer-type-def type-id='6f7e09cb' size-in-bits='64' id='64698d33'/>
+ <qualified-type-def type-id='48b5725f' volatile='yes' id='b0b3cbf9'/>
+ <pointer-type-def type-id='b0b3cbf9' size-in-bits='64' id='fe09dd29'/>
+ <function-decl name='membar_consumer' mangled-name='membar_consumer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_consumer'>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_send' mangled-name='lzc_send' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send'>
- <parameter type-id='type-id-4' name='snapname'/>
- <parameter type-id='type-id-4' name='from'/>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-82' name='flags'/>
- <return type-id='type-id-1'/>
+ <function-decl name='membar_producer' mangled-name='membar_producer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_producer'>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_get_holds' mangled-name='lzc_get_holds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_holds'>
- <parameter type-id='type-id-4' name='pool'/>
- <parameter type-id='type-id-16' name='outnvl'/>
- <return type-id='type-id-1'/>
+ <function-decl name='membar_enter' mangled-name='membar_enter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_enter'>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='lzc_release' mangled-name='lzc_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_release'>
- <parameter type-id='type-id-15' name='holds'/>
- <parameter type-id='type-id-16' name='errlist'/>
- <return type-id='type-id-1'/>
+ <function-decl name='atomic_clear_long_excl' mangled-name='atomic_clear_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_clear_long_excl'>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='3502e3ff' name='value'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='lzc_hold' mangled-name='lzc_hold' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_hold'>
- <parameter type-id='type-id-15' name='holds'/>
- <parameter type-id='type-id-1' name='cleanup_fd'/>
- <parameter type-id='type-id-16' name='errlist'/>
- <return type-id='type-id-1'/>
+ <function-decl name='atomic_set_long_excl' mangled-name='atomic_set_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_set_long_excl'>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='3502e3ff' name='value'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='lzc_sync' mangled-name='lzc_sync' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_sync'>
- <parameter type-id='type-id-4' name='fsname'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-16' name='bmarks'/>
- <return type-id='type-id-1'/>
+ <function-decl name='atomic_swap_ptr' mangled-name='atomic_swap_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ptr'>
+ <parameter type-id='fe09dd29' name='target'/>
+ <parameter type-id='eaa32e2f' name='bits'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='lzc_exists' mangled-name='lzc_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_exists'>
- <parameter type-id='type-id-4' name='dataset'/>
- <return type-id='type-id-23'/>
+ <function-decl name='atomic_swap_ulong' mangled-name='atomic_swap_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ulong'>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='ee1f298e' name='bits'/>
+ <return type-id='ee1f298e'/>
</function-decl>
- <function-decl name='lzc_snaprange_space' mangled-name='lzc_snaprange_space' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_snaprange_space'>
- <parameter type-id='type-id-4' name='firstsnap'/>
- <parameter type-id='type-id-4' name='lastsnap'/>
- <parameter type-id='type-id-81' name='usedp'/>
- <return type-id='type-id-1'/>
+ <function-decl name='atomic_swap_32' mangled-name='atomic_swap_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_32'>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='8f92235e' name='bits'/>
+ <return type-id='8f92235e'/>
</function-decl>
- <function-decl name='lzc_destroy_snaps' mangled-name='lzc_destroy_snaps' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_destroy_snaps'>
- <parameter type-id='type-id-15' name='snaps'/>
- <parameter type-id='type-id-23' name='defer'/>
- <parameter type-id='type-id-16' name='errlist'/>
- <return type-id='type-id-1'/>
+ <function-decl name='atomic_swap_16' mangled-name='atomic_swap_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_16'>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='149c6638' name='bits'/>
+ <return type-id='149c6638'/>
</function-decl>
- <function-decl name='lzc_snapshot' mangled-name='lzc_snapshot' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_snapshot'>
- <parameter type-id='type-id-15' name='snaps'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-16' name='errlist'/>
- <return type-id='type-id-1'/>
+ <function-decl name='atomic_swap_8' mangled-name='atomic_swap_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_8'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='b96825af' name='bits'/>
+ <return type-id='b96825af'/>
</function-decl>
- <function-decl name='lzc_destroy' mangled-name='lzc_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_destroy'>
- <parameter type-id='type-id-4' name='fsname'/>
- <return type-id='type-id-1'/>
+ <function-decl name='atomic_cas_ptr' mangled-name='atomic_cas_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ptr'>
+ <parameter type-id='fe09dd29' name='target'/>
+ <parameter type-id='eaa32e2f' name='exp'/>
+ <parameter type-id='eaa32e2f' name='des'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='lzc_rename' mangled-name='lzc_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_rename'>
- <parameter type-id='type-id-4' name='source'/>
- <parameter type-id='type-id-4' name='target'/>
- <return type-id='type-id-1'/>
+ <function-decl name='atomic_and_ulong_nv' mangled-name='atomic_and_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong_nv'>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='ee1f298e' name='bits'/>
+ <return type-id='ee1f298e'/>
</function-decl>
- <function-decl name='lzc_promote' mangled-name='lzc_promote' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_promote'>
- <parameter type-id='type-id-4' name='fsname'/>
- <parameter type-id='type-id-36' name='snapnamebuf'/>
- <parameter type-id='type-id-1' name='snapnamelen'/>
- <return type-id='type-id-1'/>
+ <function-decl name='atomic_and_32_nv' mangled-name='atomic_and_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32_nv'>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='8f92235e' name='bits'/>
+ <return type-id='8f92235e'/>
</function-decl>
- <function-decl name='lzc_clone' mangled-name='lzc_clone' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_clone'>
- <parameter type-id='type-id-4' name='fsname'/>
- <parameter type-id='type-id-4' name='origin'/>
- <parameter type-id='type-id-15' name='props'/>
- <return type-id='type-id-1'/>
+ <function-decl name='atomic_and_16_nv' mangled-name='atomic_and_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16_nv'>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='149c6638' name='bits'/>
+ <return type-id='149c6638'/>
</function-decl>
- <enum-decl name='lzc_dataset_type' id='type-id-83'>
- <underlying-type type-id='type-id-19'/>
- <enumerator name='LZC_DATSET_TYPE_ZFS' value='2'/>
- <enumerator name='LZC_DATSET_TYPE_ZVOL' value='3'/>
- </enum-decl>
- <function-decl name='lzc_create' mangled-name='lzc_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_create'>
- <parameter type-id='type-id-4' name='fsname'/>
- <parameter type-id='type-id-83' name='type'/>
- <parameter type-id='type-id-15' name='props'/>
- <parameter type-id='type-id-34' name='wkeydata'/>
- <parameter type-id='type-id-35' name='wkeylen'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <type-decl name='void' id='type-id-84'/>
- <function-decl name='libzfs_core_fini' mangled-name='libzfs_core_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_core_fini'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_and_8_nv' mangled-name='atomic_and_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8_nv'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='b96825af' name='bits'/>
+ <return type-id='b96825af'/>
</function-decl>
- <function-decl name='libzfs_core_init' mangled-name='libzfs_core_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_core_init'>
- <return type-id='type-id-1'/>
+ <function-decl name='atomic_or_ulong_nv' mangled-name='atomic_or_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong_nv'>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='ee1f298e' name='bits'/>
+ <return type-id='ee1f298e'/>
</function-decl>
- <function-decl name='fnvlist_alloc' mangled-name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_or_32_nv' mangled-name='atomic_or_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32_nv'>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='8f92235e' name='bits'/>
+ <return type-id='8f92235e'/>
</function-decl>
- <function-decl name='fnvlist_add_int32' mangled-name='fnvlist_add_int32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_or_16_nv' mangled-name='atomic_or_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16_nv'>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='149c6638' name='bits'/>
+ <return type-id='149c6638'/>
</function-decl>
- <function-decl name='fnvlist_lookup_boolean_value' mangled-name='fnvlist_lookup_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_or_8_nv' mangled-name='atomic_or_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8_nv'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='b96825af' name='bits'/>
+ <return type-id='b96825af'/>
</function-decl>
- <function-decl name='fnvlist_free' mangled-name='fnvlist_free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_sub_ptr_nv' mangled-name='atomic_sub_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr_nv'>
+ <parameter type-id='fe09dd29' name='target'/>
+ <parameter type-id='79a0948f' name='bits'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='__stack_chk_fail' mangled-name='__stack_chk_fail' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_sub_long_nv' mangled-name='atomic_sub_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long_nv'>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='bd54fe1a' name='bits'/>
+ <return type-id='ee1f298e'/>
</function-decl>
- <function-decl name='fnvlist_add_uint64' mangled-name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_sub_32_nv' mangled-name='atomic_sub_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32_nv'>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='3ff5601b' name='bits'/>
+ <return type-id='8f92235e'/>
</function-decl>
- <function-decl name='fnvlist_add_string' mangled-name='fnvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_sub_16_nv' mangled-name='atomic_sub_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16_nv'>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='23bd8cb5' name='bits'/>
+ <return type-id='149c6638'/>
</function-decl>
- <function-decl name='fnvlist_add_nvlist' mangled-name='fnvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_sub_8_nv' mangled-name='atomic_sub_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8_nv'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='ee31ee44' name='bits'/>
+ <return type-id='b96825af'/>
</function-decl>
- <function-decl name='fnvlist_add_boolean_value' mangled-name='fnvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_add_ptr_nv' mangled-name='atomic_add_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr_nv'>
+ <parameter type-id='fe09dd29' name='target'/>
+ <parameter type-id='79a0948f' name='bits'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='nvlist_free' mangled-name='nvlist_free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_add_long_nv' mangled-name='atomic_add_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long_nv'>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='bd54fe1a' name='bits'/>
+ <return type-id='ee1f298e'/>
</function-decl>
- <function-decl name='fnvlist_add_uint8_array' mangled-name='fnvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_add_32_nv' mangled-name='atomic_add_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32_nv'>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='3ff5601b' name='bits'/>
+ <return type-id='8f92235e'/>
</function-decl>
- <function-decl name='fnvlist_add_boolean' mangled-name='fnvlist_add_boolean' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_add_16_nv' mangled-name='atomic_add_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16_nv'>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='23bd8cb5' name='bits'/>
+ <return type-id='149c6638'/>
</function-decl>
- <function-decl name='nvlist_next_nvpair' mangled-name='nvlist_next_nvpair' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_add_8_nv' mangled-name='atomic_add_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8_nv'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='ee31ee44' name='bits'/>
+ <return type-id='b96825af'/>
</function-decl>
- <function-decl name='nvpair_name' mangled-name='nvpair_name' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_dec_ulong_nv' mangled-name='atomic_dec_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong_nv'>
+ <parameter type-id='64698d33' name='target'/>
+ <return type-id='ee1f298e'/>
</function-decl>
- <function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_dec_32_nv' mangled-name='atomic_dec_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32_nv'>
+ <parameter type-id='3a147f31' name='target'/>
+ <return type-id='8f92235e'/>
</function-decl>
- <function-decl name='strcspn' mangled-name='strcspn' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_dec_16_nv' mangled-name='atomic_dec_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16_nv'>
+ <parameter type-id='93977ae7' name='target'/>
+ <return type-id='149c6638'/>
</function-decl>
- <function-decl name='fnvlist_lookup_string' mangled-name='fnvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_dec_8_nv' mangled-name='atomic_dec_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8_nv'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <return type-id='b96825af'/>
</function-decl>
- <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_inc_ulong_nv' mangled-name='atomic_inc_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong_nv'>
+ <parameter type-id='64698d33' name='target'/>
+ <return type-id='ee1f298e'/>
</function-decl>
- <function-decl name='strrchr' mangled-name='strrchr' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_inc_32_nv' mangled-name='atomic_inc_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32_nv'>
+ <parameter type-id='3a147f31' name='target'/>
+ <return type-id='8f92235e'/>
</function-decl>
- <function-decl name='nvlist_lookup_nvlist' mangled-name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_inc_16_nv' mangled-name='atomic_inc_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16_nv'>
+ <parameter type-id='93977ae7' name='target'/>
+ <return type-id='149c6638'/>
</function-decl>
- <function-decl name='fnvlist_dup' mangled-name='fnvlist_dup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_inc_8_nv' mangled-name='atomic_inc_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8_nv'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <return type-id='b96825af'/>
</function-decl>
- <function-decl name='fnvlist_add_byte_array' mangled-name='fnvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_and_ulong' mangled-name='atomic_and_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong'>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='ee1f298e' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_lookup_uint64' mangled-name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_and_32' mangled-name='atomic_and_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32'>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='8f92235e' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_unpack' mangled-name='nvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_and_16' mangled-name='atomic_and_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16'>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='149c6638' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='__builtin_memset' mangled-name='memset' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_and_8' mangled-name='atomic_and_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='b96825af' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='fnvlist_pack' mangled-name='fnvlist_pack' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_or_ulong' mangled-name='atomic_or_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong'>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='ee1f298e' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='malloc' mangled-name='malloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_or_32' mangled-name='atomic_or_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32'>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='8f92235e' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zfs_ioctl_fd' mangled-name='zfs_ioctl_fd' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_or_16' mangled-name='atomic_or_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16'>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='149c6638' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='__errno_location' mangled-name='__errno_location' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_or_8' mangled-name='atomic_or_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='b96825af' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='fnvlist_pack_free' mangled-name='fnvlist_pack_free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_sub_ptr' mangled-name='atomic_sub_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr'>
+ <parameter type-id='fe09dd29' name='target'/>
+ <parameter type-id='79a0948f' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='free' mangled-name='free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_sub_long' mangled-name='atomic_sub_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long'>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='bd54fe1a' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='__read_alias' mangled-name='read' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='strchr' mangled-name='strchr' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_sub_32' mangled-name='atomic_sub_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32'>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='3ff5601b' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='fnvlist_lookup_uint64' mangled-name='fnvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_sub_16' mangled-name='atomic_sub_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16'>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='23bd8cb5' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='fnvlist_unpack' mangled-name='fnvlist_unpack' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_sub_8' mangled-name='atomic_sub_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='ee31ee44' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='pthread_mutex_lock' mangled-name='pthread_mutex_lock' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_add_ptr' mangled-name='atomic_add_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr'>
+ <parameter type-id='fe09dd29' name='target'/>
+ <parameter type-id='79a0948f' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='close' mangled-name='close' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_add_long' mangled-name='atomic_add_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long'>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='bd54fe1a' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='pthread_mutex_unlock' mangled-name='pthread_mutex_unlock' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_add_32' mangled-name='atomic_add_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32'>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='3ff5601b' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='__open_alias' mangled-name='open64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_add_16' mangled-name='atomic_add_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16'>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='23bd8cb5' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='zutil_device_path.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
- <function-decl name='zfs_strcmp_pathname' mangled-name='zfs_strcmp_pathname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strcmp_pathname'>
- <parameter type-id='type-id-4' name='name'/>
- <parameter type-id='type-id-4' name='cmp'/>
- <parameter type-id='type-id-1' name='wholedisk'/>
- <return type-id='type-id-1'/>
+ <function-decl name='atomic_add_8' mangled-name='atomic_add_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='ee31ee44' name='bits'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <typedef-decl name='size_t' type-id='type-id-12' id='type-id-85'/>
- <function-decl name='zfs_resolve_shortname' mangled-name='zfs_resolve_shortname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_resolve_shortname'>
- <parameter type-id='type-id-4' name='name'/>
- <parameter type-id='type-id-36' name='path'/>
- <parameter type-id='type-id-85' name='len'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <type-decl name='long int' size-in-bits='64' id='type-id-86'/>
- <typedef-decl name='__ssize_t' type-id='type-id-86' id='type-id-87'/>
- <typedef-decl name='ssize_t' type-id='type-id-87' id='type-id-88'/>
- <function-decl name='zfs_dirnamelen' mangled-name='zfs_dirnamelen' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dirnamelen'>
- <parameter type-id='type-id-4' name='path'/>
- <return type-id='type-id-88'/>
+ <function-decl name='atomic_dec_ulong' mangled-name='atomic_dec_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong'>
+ <parameter type-id='64698d33' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zfs_basename' mangled-name='zfs_basename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_basename'>
- <parameter type-id='type-id-4' name='path'/>
- <return type-id='type-id-4'/>
+ <function-decl name='atomic_dec_32' mangled-name='atomic_dec_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32'>
+ <parameter type-id='3a147f31' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='__builtin___snprintf_chk' mangled-name='__snprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_dec_16' mangled-name='atomic_dec_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16'>
+ <parameter type-id='93977ae7' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='getenv' mangled-name='getenv' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_dec_8' mangled-name='atomic_dec_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='strdup' mangled-name='strdup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_inc_ulong' mangled-name='atomic_inc_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong'>
+ <parameter type-id='64698d33' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='strtok_r' mangled-name='strtok_r' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_inc_32' mangled-name='atomic_inc_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32'>
+ <parameter type-id='3a147f31' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='strlen' mangled-name='strlen' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_inc_16' mangled-name='atomic_inc_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16'>
+ <parameter type-id='93977ae7' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='strcmp' mangled-name='strcmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_inc_8' mangled-name='atomic_inc_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='zfs_append_partition' mangled-name='zfs_append_partition' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_cas_8' mangled-name='atomic_cas_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_8'>
+ <parameter type-id='aa323ea4' name='target'/>
+ <parameter type-id='b96825af' name='exp'/>
+ <parameter type-id='b96825af' name='des'/>
+ <return type-id='b96825af'/>
</function-decl>
- <function-decl name='zpool_default_search_paths' mangled-name='zpool_default_search_paths' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_cas_16' mangled-name='atomic_cas_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_16'>
+ <parameter type-id='93977ae7' name='target'/>
+ <parameter type-id='149c6638' name='exp'/>
+ <parameter type-id='149c6638' name='des'/>
+ <return type-id='149c6638'/>
</function-decl>
- <function-decl name='strlcat' mangled-name='strlcat' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_cas_32' mangled-name='atomic_cas_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_32'>
+ <parameter type-id='3a147f31' name='target'/>
+ <parameter type-id='8f92235e' name='exp'/>
+ <parameter type-id='8f92235e' name='des'/>
+ <return type-id='8f92235e'/>
</function-decl>
- <function-decl name='access' mangled-name='access' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='atomic_cas_ulong' mangled-name='atomic_cas_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ulong'>
+ <parameter type-id='64698d33' name='target'/>
+ <parameter type-id='ee1f298e' name='exp'/>
+ <parameter type-id='ee1f298e' name='des'/>
+ <return type-id='ee1f298e'/>
+ </function-decl>
+ <type-decl name='long int' size-in-bits='64' id='bd54fe1a'/>
+ <type-decl name='short int' size-in-bits='16' id='a2185560'/>
+ <typedef-decl name='int32_t' type-id='33f57a65' id='3ff5601b'/>
+ <typedef-decl name='ssize_t' type-id='41060289' id='79a0948f'/>
+ <typedef-decl name='__int32_t' type-id='95e97e5e' id='33f57a65'/>
+ <typedef-decl name='__ssize_t' type-id='bd54fe1a' id='41060289'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='getexecname.c' language='LANG_C99'>
+ <function-decl name='getexecname' mangled-name='getexecname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getexecname'>
+ <return type-id='80f4b756'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='zutil_import.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
- <pointer-type-def type-id='type-id-84' size-in-bits='64' id='type-id-89'/>
- <class-decl name='importargs' size-in-bits='448' is-struct='yes' visibility='default' id='type-id-90'>
+ <abi-instr version='1.0' address-size='64' path='list.c' language='LANG_C99'>
+ <typedef-decl name='list_t' type-id='e824dae9' id='0899125f'/>
+ <class-decl name='list' size-in-bits='256' is-struct='yes' visibility='default' id='e824dae9'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='path' type-id='type-id-91' visibility='default'/>
+ <var-decl name='list_size' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='paths' type-id='type-id-1' visibility='default'/>
+ <var-decl name='list_offset' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='poolname' type-id='type-id-4' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='guid' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='cachefile' type-id='type-id-4' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='can_be_active' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='scan' type-id='type-id-23' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='policy' type-id='type-id-15' visibility='default'/>
+ <var-decl name='list_head' type-id='b0b5e45e' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-36' size-in-bits='64' id='type-id-91'/>
- <typedef-decl name='importargs_t' type-id='type-id-90' id='type-id-92'/>
- <pointer-type-def type-id='type-id-92' size-in-bits='64' id='type-id-93'/>
- <class-decl name='pool_config_ops' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-94'>
+ <class-decl name='list_node' size-in-bits='128' is-struct='yes' visibility='default' id='b0b5e45e'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='pco_refresh_config' type-id='type-id-95' visibility='default'/>
+ <var-decl name='next' type-id='b03eadb4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='pco_pool_active' type-id='type-id-96' visibility='default'/>
+ <var-decl name='prev' type-id='b03eadb4' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='refresh_config_func_t' type-id='type-id-97' id='type-id-98'/>
- <pointer-type-def type-id='type-id-98' size-in-bits='64' id='type-id-95'/>
- <typedef-decl name='pool_active_func_t' type-id='type-id-99' id='type-id-100'/>
- <pointer-type-def type-id='type-id-100' size-in-bits='64' id='type-id-96'/>
- <qualified-type-def type-id='type-id-94' const='yes' id='type-id-101'/>
- <typedef-decl name='pool_config_ops_t' type-id='type-id-101' id='type-id-102'/>
- <pointer-type-def type-id='type-id-102' size-in-bits='64' id='type-id-103'/>
- <function-decl name='zpool_find_config' mangled-name='zpool_find_config' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_config'>
- <parameter type-id='type-id-89' name='hdl'/>
- <parameter type-id='type-id-4' name='target'/>
- <parameter type-id='type-id-16' name='configp'/>
- <parameter type-id='type-id-93' name='args'/>
- <parameter type-id='type-id-103' name='pco'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zpool_search_import' mangled-name='zpool_search_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_search_import'>
- <parameter type-id='type-id-89' name='hdl'/>
- <parameter type-id='type-id-93' name='import'/>
- <parameter type-id='type-id-103' name='pco'/>
- <return type-id='type-id-15'/>
- </function-decl>
- <pointer-type-def type-id='type-id-1' size-in-bits='64' id='type-id-104'/>
- <function-decl name='zpool_read_label' mangled-name='zpool_read_label' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_read_label'>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-16' name='config'/>
- <parameter type-id='type-id-104' name='num_labels'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvpair_value_nvlist' mangled-name='nvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='strtoull' mangled-name='strtoull' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='nvlist_lookup_string' mangled-name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='strpbrk' mangled-name='strpbrk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__fxstat64' mangled-name='__fxstat64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='calloc' mangled-name='calloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='fnvlist_lookup_nvlist' mangled-name='fnvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_mutex_init' mangled-name='pthread_mutex_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_mutex_destroy' mangled-name='pthread_mutex_destroy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='fnvpair_value_nvlist' mangled-name='fnvpair_value_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='nvlist_alloc' mangled-name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='nvlist_add_string' mangled-name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='nvlist_add_nvlist' mangled-name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='dcgettext' mangled-name='dcgettext' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='strerror' mangled-name='strerror' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__realpath_chk' mangled-name='__realpath_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='strndup' mangled-name='strndup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_basename' mangled-name='zfs_basename' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_dirnamelen' mangled-name='zfs_dirnamelen' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__xstat' mangled-name='__xstat64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zpool_find_import_blkid' mangled-name='zpool_find_import_blkid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='nvlist_empty' mangled-name='nvlist_empty' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='geteuid' mangled-name='geteuid' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='nvlist_lookup_nvlist_array' mangled-name='nvlist_lookup_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='opendir' mangled-name='opendir' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='readdir64' mangled-name='readdir64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='closedir' mangled-name='closedir' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__asprintf_chk' mangled-name='__asprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='avl_insert' mangled-name='avl_insert' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='ioctl' mangled-name='ioctl' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__pread64_alias' mangled-name='pread64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='spl_pagesize' mangled-name='spl_pagesize' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='posix_memalign' mangled-name='posix_memalign' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='aio_error' mangled-name='aio_error64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='aio_return' mangled-name='aio_return64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='lio_listio' mangled-name='lio_listio64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='exit' mangled-name='exit' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__builtin___vsnprintf_chk' mangled-name='__vsnprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <typedef-decl name='list_node_t' type-id='b0b5e45e' id='b21843b2'/>
+ <pointer-type-def type-id='b0b5e45e' size-in-bits='64' id='b03eadb4'/>
+ <pointer-type-def type-id='b21843b2' size-in-bits='64' id='ccc38265'/>
+ <pointer-type-def type-id='0899125f' size-in-bits='64' id='352ec160'/>
+ <function-decl name='list_is_empty' mangled-name='list_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_is_empty'>
+ <parameter type-id='352ec160' name='list'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='__fprintf_chk' mangled-name='__fprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_link_active' mangled-name='list_link_active' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_active'>
+ <parameter type-id='ccc38265' name='ln'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='update_vdev_config_dev_strs' mangled-name='update_vdev_config_dev_strs' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_link_init' mangled-name='list_link_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_init'>
+ <parameter type-id='ccc38265' name='ln'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='strncmp' mangled-name='strncmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_link_replace' mangled-name='list_link_replace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_replace'>
+ <parameter type-id='ccc38265' name='lold'/>
+ <parameter type-id='ccc38265' name='lnew'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_add_uint64' mangled-name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_move_tail' mangled-name='list_move_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_move_tail'>
+ <parameter type-id='352ec160' name='dst'/>
+ <parameter type-id='352ec160' name='src'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='nvlist_dup' mangled-name='nvlist_dup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_prev' mangled-name='list_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_prev'>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='nvlist_add_nvlist_array' mangled-name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_next' mangled-name='list_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_next'>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='nvlist_remove' mangled-name='nvlist_remove' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_tail' mangled-name='list_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_tail'>
+ <parameter type-id='352ec160' name='list'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='nvlist_lookup_uint64_array' mangled-name='nvlist_lookup_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_head' mangled-name='list_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_head'>
+ <parameter type-id='352ec160' name='list'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='nvlist_add_uint64_array' mangled-name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_remove_tail' mangled-name='list_remove_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_tail'>
+ <parameter type-id='352ec160' name='list'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='sysconf' mangled-name='sysconf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_remove_head' mangled-name='list_remove_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_head'>
+ <parameter type-id='352ec160' name='list'/>
+ <return type-id='eaa32e2f'/>
</function-decl>
- <function-decl name='tpool_create' mangled-name='tpool_create' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_remove' mangled-name='list_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove'>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_insert_tail' mangled-name='list_insert_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_tail'>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='tpool_dispatch' mangled-name='tpool_dispatch' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_insert_head' mangled-name='list_insert_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_head'>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_insert_before' mangled-name='list_insert_before' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_before'>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <parameter type-id='eaa32e2f' name='nobject'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='tpool_wait' mangled-name='tpool_wait' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_insert_after' mangled-name='list_insert_after' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_after'>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='eaa32e2f' name='object'/>
+ <parameter type-id='eaa32e2f' name='nobject'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='tpool_destroy' mangled-name='tpool_destroy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_destroy' mangled-name='list_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_destroy'>
+ <parameter type-id='352ec160' name='list'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='list_create' mangled-name='list_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_create'>
+ <parameter type-id='352ec160' name='list'/>
+ <parameter type-id='b59d7dce' name='size'/>
+ <parameter type-id='b59d7dce' name='offset'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-type size-in-bits='64' id='type-id-99'>
- <parameter type-id='type-id-89'/>
- <parameter type-id='type-id-4'/>
- <parameter type-id='type-id-8'/>
- <parameter type-id='type-id-24'/>
- <return type-id='type-id-1'/>
- </function-type>
- <function-type size-in-bits='64' id='type-id-97'>
- <parameter type-id='type-id-89'/>
- <parameter type-id='type-id-15'/>
- <return type-id='type-id-15'/>
- </function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='zutil_nicenum.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
- <function-decl name='zfs_nicebytes' mangled-name='zfs_nicebytes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicebytes'>
- <parameter type-id='type-id-8' name='num'/>
- <parameter type-id='type-id-36' name='buf'/>
- <parameter type-id='type-id-85' name='buflen'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_niceraw' mangled-name='zfs_niceraw' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_niceraw'>
- <parameter type-id='type-id-8' name='num'/>
- <parameter type-id='type-id-36' name='buf'/>
- <parameter type-id='type-id-85' name='buflen'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_nicetime' mangled-name='zfs_nicetime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicetime'>
- <parameter type-id='type-id-8' name='num'/>
- <parameter type-id='type-id-36' name='buf'/>
- <parameter type-id='type-id-85' name='buflen'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_nicenum' mangled-name='zfs_nicenum' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicenum'>
- <parameter type-id='type-id-8' name='num'/>
- <parameter type-id='type-id-36' name='buf'/>
- <parameter type-id='type-id-85' name='buflen'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <enum-decl name='zfs_nicenum_format' id='type-id-105'>
- <underlying-type type-id='type-id-19'/>
- <enumerator name='ZFS_NICENUM_1024' value='0'/>
- <enumerator name='ZFS_NICENUM_BYTES' value='1'/>
- <enumerator name='ZFS_NICENUM_TIME' value='2'/>
- <enumerator name='ZFS_NICENUM_RAW' value='3'/>
- <enumerator name='ZFS_NICENUM_RAWTIME' value='4'/>
- </enum-decl>
- <function-decl name='zfs_nicenum_format' mangled-name='zfs_nicenum_format' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicenum_format'>
- <parameter type-id='type-id-8' name='num'/>
- <parameter type-id='type-id-36' name='buf'/>
- <parameter type-id='type-id-85' name='buflen'/>
- <parameter type-id='type-id-105' name='format'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_isnumber' mangled-name='zfs_isnumber' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_isnumber'>
- <parameter type-id='type-id-4' name='str'/>
- <return type-id='type-id-23'/>
- </function-decl>
- <function-decl name='powl' mangled-name='powl' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__builtin_snprintf' mangled-name='snprintf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <abi-instr version='1.0' address-size='64' path='mkdirp.c' language='LANG_C99'>
+ <typedef-decl name='mode_t' type-id='e1c52942' id='d50d396c'/>
+ <function-decl name='mkdirp' mangled-name='mkdirp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mkdirp'>
+ <parameter type-id='80f4b756' name='d'/>
+ <parameter type-id='d50d396c' name='mode'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='__ctype_b_loc' mangled-name='__ctype_b_loc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <typedef-decl name='__mode_t' type-id='f0981eeb' id='e1c52942'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/gethostid.c' language='LANG_C99'>
+ <function-decl name='get_system_hostid' mangled-name='get_system_hostid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_system_hostid'>
+ <return type-id='7359adad'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='zutil_pool.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
- <pointer-type-def type-id='type-id-16' size-in-bits='64' id='type-id-106'/>
- <pointer-type-def type-id='type-id-35' size-in-bits='64' id='type-id-107'/>
- <function-decl name='zpool_history_unpack' mangled-name='zpool_history_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_history_unpack'>
- <parameter type-id='type-id-36' name='buf'/>
- <parameter type-id='type-id-8' name='bytes_read'/>
- <parameter type-id='type-id-81' name='leftover'/>
- <parameter type-id='type-id-106' name='records'/>
- <parameter type-id='type-id-107' name='numrecords'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <class-decl name='ddt_stat' size-in-bits='512' is-struct='yes' visibility='default' id='type-id-108'>
+ <abi-instr version='1.0' address-size='64' path='os/linux/getmntany.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='03085adc' size-in-bits='192' id='083f8d58'>
+ <subrange length='3' type-id='7359adad' id='56f209d2'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='8' id='89feb1ec'>
+ <subrange length='1' type-id='7359adad' id='52f813b4'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='160' id='664ac0b7'>
+ <subrange length='20' type-id='7359adad' id='fdca39cf'/>
+ </array-type-def>
+ <class-decl name='extmnttab' size-in-bits='320' is-struct='yes' visibility='default' id='0c544dc0'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='dds_blocks' type-id='type-id-8' visibility='default'/>
+ <var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='dds_lsize' type-id='type-id-8' visibility='default'/>
+ <var-decl name='mnt_mountp' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='dds_psize' type-id='type-id-8' visibility='default'/>
+ <var-decl name='mnt_fstype' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='dds_dsize' type-id='type-id-8' visibility='default'/>
+ <var-decl name='mnt_mntopts' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='dds_ref_blocks' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='dds_ref_lsize' type-id='type-id-8' visibility='default'/>
+ <var-decl name='mnt_major' type-id='3502e3ff' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='dds_ref_psize' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='dds_ref_dsize' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='mnt_minor' type-id='3502e3ff' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='ddt_stat_t' type-id='type-id-108' id='type-id-109'/>
- <qualified-type-def type-id='type-id-109' const='yes' id='type-id-110'/>
- <pointer-type-def type-id='type-id-110' size-in-bits='64' id='type-id-111'/>
- <class-decl name='ddt_histogram' size-in-bits='32768' is-struct='yes' visibility='default' id='type-id-112'>
+ <class-decl name='stat64' size-in-bits='1152' is-struct='yes' visibility='default' id='0bbec9cd'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='ddh_stat' type-id='type-id-113' visibility='default'/>
+ <var-decl name='st_dev' type-id='35ed8932' visibility='default'/>
</data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-109' size-in-bits='32768' id='type-id-113'>
- <subrange length='64' type-id='type-id-12' id='type-id-114'/>
-
- </array-type-def>
- <typedef-decl name='ddt_histogram_t' type-id='type-id-112' id='type-id-115'/>
- <qualified-type-def type-id='type-id-115' const='yes' id='type-id-116'/>
- <pointer-type-def type-id='type-id-116' size-in-bits='64' id='type-id-117'/>
- <function-decl name='zpool_dump_ddt' mangled-name='zpool_dump_ddt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_dump_ddt'>
- <parameter type-id='type-id-111' name='dds_total'/>
- <parameter type-id='type-id-117' name='ddh'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='realloc' mangled-name='realloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__builtin_putchar' mangled-name='putchar' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__builtin_puts' mangled-name='puts' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__printf_chk' mangled-name='__printf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_nicenum' mangled-name='zfs_nicenum' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_nicebytes' mangled-name='zfs_nicebytes' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/zutil_device_path_os.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
- <function-decl name='is_mpath_whole_disk' mangled-name='is_mpath_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_mpath_whole_disk'>
- <parameter type-id='type-id-4' name='path'/>
- <return type-id='type-id-23'/>
- </function-decl>
- <function-decl name='zfs_get_underlying_path' mangled-name='zfs_get_underlying_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_underlying_path'>
- <parameter type-id='type-id-4' name='dev_name'/>
- <return type-id='type-id-36'/>
- </function-decl>
- <function-decl name='zfs_dev_is_whole_disk' mangled-name='zfs_dev_is_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_is_whole_disk'>
- <parameter type-id='type-id-4' name='dev_name'/>
- <return type-id='type-id-23'/>
- </function-decl>
- <function-decl name='zfs_dev_is_dm' mangled-name='zfs_dev_is_dm' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_is_dm'>
- <parameter type-id='type-id-4' name='dev_name'/>
- <return type-id='type-id-23'/>
- </function-decl>
- <function-decl name='zfs_get_enclosure_sysfs_path' mangled-name='zfs_get_enclosure_sysfs_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_enclosure_sysfs_path'>
- <parameter type-id='type-id-4' name='dev_name'/>
- <return type-id='type-id-36'/>
- </function-decl>
- <function-decl name='zfs_strip_path' mangled-name='zfs_strip_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strip_path'>
- <parameter type-id='type-id-36' name='path'/>
- <return type-id='type-id-36'/>
- </function-decl>
- <function-decl name='zfs_strip_partition' mangled-name='zfs_strip_partition' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strip_partition'>
- <parameter type-id='type-id-36' name='path'/>
- <return type-id='type-id-36'/>
- </function-decl>
- <function-decl name='zfs_append_partition' mangled-name='zfs_append_partition' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_append_partition'>
- <parameter type-id='type-id-36' name='path'/>
- <parameter type-id='type-id-85' name='max_len'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='udev_device_get_property_value' mangled-name='udev_device_get_property_value' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='udev_new' mangled-name='udev_new' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='udev_device_new_from_subsystem_sysname' mangled-name='udev_device_new_from_subsystem_sysname' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='udev_device_unref' mangled-name='udev_device_unref' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__realpath_alias' mangled-name='realpath' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='efi_alloc_and_init' mangled-name='efi_alloc_and_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='efi_free' mangled-name='efi_free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__readlink_alias' mangled-name='readlink' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='strstr' mangled-name='strstr' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/zutil_import_os.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
- <function-decl name='update_vdev_config_dev_strs' mangled-name='update_vdev_config_dev_strs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='update_vdev_config_dev_strs'>
- <parameter type-id='type-id-15' name='nv'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zpool_label_disk_wait' mangled-name='zpool_label_disk_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk_wait'>
- <parameter type-id='type-id-4' name='path'/>
- <parameter type-id='type-id-1' name='timeout_ms'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <class-decl name='udev_device' is-struct='yes' visibility='default' is-declaration-only='yes' id='type-id-118'/>
- <pointer-type-def type-id='type-id-118' size-in-bits='64' id='type-id-119'/>
- <function-decl name='zfs_device_get_devid' mangled-name='zfs_device_get_devid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_device_get_devid'>
- <parameter type-id='type-id-119' name='dev'/>
- <parameter type-id='type-id-36' name='bufptr'/>
- <parameter type-id='type-id-85' name='buflen'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <qualified-type-def type-id='type-id-4' const='yes' id='type-id-120'/>
- <pointer-type-def type-id='type-id-120' size-in-bits='64' id='type-id-121'/>
- <pointer-type-def type-id='type-id-85' size-in-bits='64' id='type-id-122'/>
- <function-decl name='zpool_default_search_paths' mangled-name='zpool_default_search_paths' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_default_search_paths'>
- <parameter type-id='type-id-122' name='count'/>
- <return type-id='type-id-121'/>
- </function-decl>
- <function-decl name='zfs_dev_flush' mangled-name='zfs_dev_flush' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_flush'>
- <parameter type-id='type-id-1' name='fd'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='zfs_device_get_physical' mangled-name='zfs_device_get_physical' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_device_get_physical'>
- <parameter type-id='type-id-119' name='dev'/>
- <parameter type-id='type-id-36' name='bufptr'/>
- <parameter type-id='type-id-85' name='buflen'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='strtoul' mangled-name='strtoul' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='strncasecmp' mangled-name='strncasecmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='nvlist_remove_all' mangled-name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_get_underlying_path' mangled-name='zfs_get_underlying_path' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zfs_get_enclosure_sysfs_path' mangled-name='zfs_get_enclosure_sysfs_path' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='clock_gettime' mangled-name='clock_gettime' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='sched_yield' mangled-name='sched_yield' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='usleep' mangled-name='usleep' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='udev_unref' mangled-name='udev_unref' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='udev_list_entry_get_name' mangled-name='udev_list_entry_get_name' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='udev_device_get_devlinks_list_entry' mangled-name='udev_device_get_devlinks_list_entry' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='udev_list_entry_get_next' mangled-name='udev_list_entry_get_next' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='udev_device_get_parent_with_subsystem_devtype' mangled-name='udev_device_get_parent_with_subsystem_devtype' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='blkid_get_cache' mangled-name='blkid_get_cache' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='blkid_probe_all_new' mangled-name='blkid_probe_all_new' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='blkid_dev_iterate_begin' mangled-name='blkid_dev_iterate_begin' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='blkid_dev_set_search' mangled-name='blkid_dev_set_search' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zutil_alloc' mangled-name='zutil_alloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='blkid_dev_next' mangled-name='blkid_dev_next' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='blkid_dev_devname' mangled-name='blkid_dev_devname' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zutil_strdup' mangled-name='zutil_strdup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='blkid_dev_iterate_end' mangled-name='blkid_dev_iterate_end' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='blkid_put_cache' mangled-name='blkid_put_cache' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='label_paths' mangled-name='label_paths' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='sscanf' mangled-name='sscanf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='zpool_read_label' mangled-name='zpool_read_label' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/zutil_compat.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzutil' language='LANG_C99'>
- <class-decl name='zfs_cmd' size-in-bits='109952' is-struct='yes' visibility='default' id='type-id-123'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zc_name' type-id='type-id-124' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='st_ino' type-id='71288a47' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='32768'>
- <var-decl name='zc_nvlist_src' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='st_nlink' type-id='80f0b9df' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='32832'>
- <var-decl name='zc_nvlist_src_size' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='st_mode' type-id='e1c52942' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='32896'>
- <var-decl name='zc_nvlist_dst' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='224'>
+ <var-decl name='st_uid' type-id='cc5fcceb' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='32960'>
- <var-decl name='zc_nvlist_dst_size' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='st_gid' type-id='d94ec6d9' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='33024'>
- <var-decl name='zc_nvlist_dst_filled' type-id='type-id-23' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='288'>
+ <var-decl name='__pad0' type-id='95e97e5e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='33056'>
- <var-decl name='zc_pad2' type-id='type-id-1' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='st_rdev' type-id='35ed8932' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='33088'>
- <var-decl name='zc_history' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='st_size' type-id='79989e9c' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='33152'>
- <var-decl name='zc_value' type-id='type-id-125' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='st_blksize' type-id='d3f10a7f' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='98688'>
- <var-decl name='zc_string' type-id='type-id-53' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='st_blocks' type-id='4e711bf1' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='100736'>
- <var-decl name='zc_guid' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='st_atim' type-id='a9c79a1f' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='100800'>
- <var-decl name='zc_nvlist_conf' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='704'>
+ <var-decl name='st_mtim' type-id='a9c79a1f' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='100864'>
- <var-decl name='zc_nvlist_conf_size' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='832'>
+ <var-decl name='st_ctim' type-id='a9c79a1f' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='100928'>
- <var-decl name='zc_cookie' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='960'>
+ <var-decl name='__glibc_reserved' type-id='083f8d58' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='100992'>
- <var-decl name='zc_objset_type' type-id='type-id-8' visibility='default'/>
+ </class-decl>
+ <typedef-decl name='__dev_t' type-id='7359adad' id='35ed8932'/>
+ <typedef-decl name='__ino64_t' type-id='7359adad' id='71288a47'/>
+ <typedef-decl name='__nlink_t' type-id='7359adad' id='80f0b9df'/>
+ <typedef-decl name='__mode_t' type-id='f0981eeb' id='e1c52942'/>
+ <typedef-decl name='__uid_t' type-id='f0981eeb' id='cc5fcceb'/>
+ <typedef-decl name='__gid_t' type-id='f0981eeb' id='d94ec6d9'/>
+ <typedef-decl name='__off_t' type-id='bd54fe1a' id='79989e9c'/>
+ <typedef-decl name='__blksize_t' type-id='bd54fe1a' id='d3f10a7f'/>
+ <typedef-decl name='__blkcnt64_t' type-id='bd54fe1a' id='4e711bf1'/>
+ <class-decl name='timespec' size-in-bits='128' is-struct='yes' visibility='default' id='a9c79a1f'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='tv_sec' type-id='65eda9c0' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='101056'>
- <var-decl name='zc_perm_action' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='tv_nsec' type-id='03085adc' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='101120'>
- <var-decl name='zc_history_len' type-id='type-id-8' visibility='default'/>
+ </class-decl>
+ <typedef-decl name='__time_t' type-id='bd54fe1a' id='65eda9c0'/>
+ <typedef-decl name='__syscall_slong_t' type-id='bd54fe1a' id='03085adc'/>
+ <typedef-decl name='FILE' type-id='ec1ed955' id='aa12d1ba'/>
+ <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='ec1ed955'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='101184'>
- <var-decl name='zc_history_offset' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='_IO_read_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='101248'>
- <var-decl name='zc_obj' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='_IO_read_end' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='101312'>
- <var-decl name='zc_iflags' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='_IO_read_base' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='101376'>
- <var-decl name='zc_share' type-id='type-id-126' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='_IO_write_base' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='101632'>
- <var-decl name='zc_objset_stats' type-id='type-id-127' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='103936'>
- <var-decl name='zc_begin_record' type-id='type-id-40' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='106368'>
- <var-decl name='zc_inject_record' type-id='type-id-128' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109184'>
- <var-decl name='zc_defer_destroy' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109216'>
- <var-decl name='zc_flags' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109248'>
- <var-decl name='zc_action_handle' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109312'>
- <var-decl name='zc_cleanup_fd' type-id='type-id-1' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109344'>
- <var-decl name='zc_simple' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109352'>
- <var-decl name='zc_pad' type-id='type-id-74' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109376'>
- <var-decl name='zc_sendobj' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109440'>
- <var-decl name='zc_fromobj' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109504'>
- <var-decl name='zc_createtxg' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109568'>
- <var-decl name='zc_stat' type-id='type-id-129' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='109888'>
- <var-decl name='zc_zoneid' type-id='type-id-8' visibility='default'/>
- </data-member>
- </class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='32768' id='type-id-124'>
- <subrange length='4096' type-id='type-id-12' id='type-id-130'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='65536' id='type-id-125'>
- <subrange length='8192' type-id='type-id-12' id='type-id-131'/>
-
- </array-type-def>
- <class-decl name='zfs_share' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-132'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='z_exportdata' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='z_sharedata' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='z_sharetype' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='z_sharemax' type-id='type-id-8' visibility='default'/>
- </data-member>
- </class-decl>
- <typedef-decl name='zfs_share_t' type-id='type-id-132' id='type-id-126'/>
- <class-decl name='dmu_objset_stats' size-in-bits='2304' is-struct='yes' visibility='default' id='type-id-133'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='dds_num_clones' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='dds_creation_txg' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='_IO_write_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='dds_guid' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='_IO_write_end' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='dds_type' type-id='type-id-52' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='_IO_buf_base' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='dds_is_snapshot' type-id='type-id-33' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='_IO_buf_end' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='232'>
- <var-decl name='dds_inconsistent' type-id='type-id-33' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='576'>
+ <var-decl name='_IO_save_base' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='240'>
- <var-decl name='dds_redacted' type-id='type-id-33' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='640'>
+ <var-decl name='_IO_backup_base' type-id='26a90f95' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='248'>
- <var-decl name='dds_origin' type-id='type-id-53' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='704'>
+ <var-decl name='_IO_save_end' type-id='26a90f95' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='dmu_objset_stats_t' type-id='type-id-133' id='type-id-127'/>
- <class-decl name='zinject_record' size-in-bits='2816' is-struct='yes' visibility='default' id='type-id-134'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zi_objset' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='768'>
+ <var-decl name='_markers' type-id='e4c6fa61' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zi_object' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='832'>
+ <var-decl name='_chain' type-id='dca988a5' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zi_start' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='896'>
+ <var-decl name='_fileno' type-id='95e97e5e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='zi_end' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='928'>
+ <var-decl name='_flags2' type-id='95e97e5e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='zi_guid' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='960'>
+ <var-decl name='_old_offset' type-id='79989e9c' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='zi_level' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1024'>
+ <var-decl name='_cur_column' type-id='8efea9e5' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='352'>
- <var-decl name='zi_error' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1040'>
+ <var-decl name='_vtable_offset' type-id='28577a57' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='zi_type' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1048'>
+ <var-decl name='_shortbuf' type-id='89feb1ec' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='zi_freq' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1088'>
+ <var-decl name='_lock' type-id='cecf4ea7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='zi_failfast' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1152'>
+ <var-decl name='_offset' type-id='724e4de6' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='zi_func' type-id='type-id-53' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1216'>
+ <var-decl name='__pad1' type-id='eaa32e2f' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2560'>
- <var-decl name='zi_iotype' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1280'>
+ <var-decl name='__pad2' type-id='eaa32e2f' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2592'>
- <var-decl name='zi_duration' type-id='type-id-6' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1344'>
+ <var-decl name='__pad3' type-id='eaa32e2f' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2624'>
- <var-decl name='zi_timer' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1408'>
+ <var-decl name='__pad4' type-id='eaa32e2f' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2688'>
- <var-decl name='zi_nlanes' type-id='type-id-8' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1472'>
+ <var-decl name='__pad5' type-id='b59d7dce' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2752'>
- <var-decl name='zi_cmd' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1536'>
+ <var-decl name='_mode' type-id='95e97e5e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='2784'>
- <var-decl name='zi_dvas' type-id='type-id-7' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='1568'>
+ <var-decl name='_unused2' type-id='664ac0b7' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='zinject_record_t' type-id='type-id-134' id='type-id-128'/>
- <class-decl name='zfs_stat' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-135'>
+ <class-decl name='_IO_marker' size-in-bits='192' is-struct='yes' visibility='default' id='010ae0b9'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='zs_gen' type-id='type-id-8' visibility='default'/>
+ <var-decl name='_next' type-id='e4c6fa61' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='zs_mode' type-id='type-id-8' visibility='default'/>
+ <var-decl name='_sbuf' type-id='dca988a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='zs_links' type-id='type-id-8' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='zs_ctime' type-id='type-id-136' visibility='default'/>
+ <var-decl name='_pos' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-8' size-in-bits='128' id='type-id-136'>
- <subrange length='2' type-id='type-id-12' id='type-id-137'/>
-
- </array-type-def>
- <typedef-decl name='zfs_stat_t' type-id='type-id-135' id='type-id-129'/>
- <typedef-decl name='zfs_cmd_t' type-id='type-id-123' id='type-id-138'/>
- <pointer-type-def type-id='type-id-138' size-in-bits='64' id='type-id-139'/>
- <function-decl name='zfs_ioctl_fd' mangled-name='zfs_ioctl_fd' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_ioctl_fd'>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-12' name='request'/>
- <parameter type-id='type-id-139' name='zc'/>
- <return type-id='type-id-1'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='../../module/avl/avl.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libavl' language='LANG_C99'>
- <class-decl name='avl_tree' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-140'>
+ <typedef-decl name='_IO_lock_t' type-id='48b5725f' id='bb4788fa'/>
+ <typedef-decl name='__off64_t' type-id='bd54fe1a' id='724e4de6'/>
+ <class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='1b055409'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='avl_root' type-id='type-id-141' visibility='default'/>
+ <var-decl name='mnt_special' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='avl_compar' type-id='type-id-142' visibility='default'/>
+ <var-decl name='mnt_mountp' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='avl_offset' type-id='type-id-85' visibility='default'/>
+ <var-decl name='mnt_fstype' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='avl_numnodes' type-id='type-id-143' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='avl_pad' type-id='type-id-85' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='avl_node' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-144'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='avl_child' type-id='type-id-145' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='avl_pcb' type-id='type-id-146' visibility='default'/>
+ <var-decl name='mnt_mntopts' type-id='26a90f95' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-144' size-in-bits='64' id='type-id-141'/>
-
- <array-type-def dimensions='1' type-id='type-id-141' size-in-bits='128' id='type-id-145'>
- <subrange length='2' type-id='type-id-12' id='type-id-137'/>
-
- </array-type-def>
- <typedef-decl name='uintptr_t' type-id='type-id-12' id='type-id-146'/>
- <pointer-type-def type-id='type-id-147' size-in-bits='64' id='type-id-142'/>
- <typedef-decl name='ulong_t' type-id='type-id-12' id='type-id-143'/>
- <typedef-decl name='avl_tree_t' type-id='type-id-140' id='type-id-148'/>
- <pointer-type-def type-id='type-id-148' size-in-bits='64' id='type-id-149'/>
- <pointer-type-def type-id='type-id-89' size-in-bits='64' id='type-id-150'/>
- <function-decl name='avl_destroy_nodes' mangled-name='avl_destroy_nodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy_nodes'>
- <parameter type-id='type-id-149' name='tree'/>
- <parameter type-id='type-id-150' name='cookie'/>
- <return type-id='type-id-89'/>
- </function-decl>
- <function-decl name='avl_is_empty' mangled-name='avl_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_is_empty'>
- <parameter type-id='type-id-149' name='tree'/>
- <return type-id='type-id-23'/>
- </function-decl>
- <function-decl name='avl_numnodes' mangled-name='avl_numnodes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_numnodes'>
- <parameter type-id='type-id-149' name='tree'/>
- <return type-id='type-id-143'/>
- </function-decl>
- <function-decl name='avl_destroy' mangled-name='avl_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_destroy'>
- <parameter type-id='type-id-149' name='tree'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='avl_create' mangled-name='avl_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_create'>
- <parameter type-id='type-id-149' name='tree'/>
- <parameter type-id='type-id-142' name='compar'/>
- <parameter type-id='type-id-85' name='size'/>
- <parameter type-id='type-id-85' name='offset'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='avl_swap' mangled-name='avl_swap' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_swap'>
- <parameter type-id='type-id-149' name='tree1'/>
- <parameter type-id='type-id-149' name='tree2'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='avl_update' mangled-name='avl_update' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update'>
- <parameter type-id='type-id-149' name='t'/>
- <parameter type-id='type-id-89' name='obj'/>
- <return type-id='type-id-23'/>
- </function-decl>
- <function-decl name='avl_update_gt' mangled-name='avl_update_gt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_gt'>
- <parameter type-id='type-id-149' name='t'/>
- <parameter type-id='type-id-89' name='obj'/>
- <return type-id='type-id-23'/>
- </function-decl>
- <function-decl name='avl_update_lt' mangled-name='avl_update_lt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_update_lt'>
- <parameter type-id='type-id-149' name='t'/>
- <parameter type-id='type-id-89' name='obj'/>
- <return type-id='type-id-23'/>
- </function-decl>
- <function-decl name='avl_remove' mangled-name='avl_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_remove'>
- <parameter type-id='type-id-149' name='tree'/>
- <parameter type-id='type-id-89' name='data'/>
- <return type-id='type-id-84'/>
+ <pointer-type-def type-id='aa12d1ba' size-in-bits='64' id='822cd80b'/>
+ <pointer-type-def type-id='ec1ed955' size-in-bits='64' id='dca988a5'/>
+ <pointer-type-def type-id='bb4788fa' size-in-bits='64' id='cecf4ea7'/>
+ <pointer-type-def type-id='010ae0b9' size-in-bits='64' id='e4c6fa61'/>
+ <pointer-type-def type-id='0c544dc0' size-in-bits='64' id='394fc496'/>
+ <pointer-type-def type-id='1b055409' size-in-bits='64' id='9d424d31'/>
+ <pointer-type-def type-id='0bbec9cd' size-in-bits='64' id='62f7a03d'/>
+ <function-decl name='getextmntent' mangled-name='getextmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getextmntent'>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='394fc496' name='entry'/>
+ <parameter type-id='62f7a03d' name='statbuf'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='avl_add' mangled-name='avl_add' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_add'>
- <parameter type-id='type-id-149' name='tree'/>
- <parameter type-id='type-id-89' name='new_node'/>
- <return type-id='type-id-84'/>
+ <function-decl name='getmntany' mangled-name='getmntany' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getmntany'>
+ <parameter type-id='822cd80b' name='fp'/>
+ <parameter type-id='9d424d31' name='mgetp'/>
+ <parameter type-id='9d424d31' name='mrefp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='avl_insert_here' mangled-name='avl_insert_here' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert_here'>
- <parameter type-id='type-id-149' name='tree'/>
- <parameter type-id='type-id-89' name='new_data'/>
- <parameter type-id='type-id-89' name='here'/>
- <parameter type-id='type-id-1' name='direction'/>
- <return type-id='type-id-84'/>
+ <function-decl name='_sol_getmntent' mangled-name='_sol_getmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='_sol_getmntent'>
+ <parameter type-id='822cd80b' name='fp'/>
+ <parameter type-id='9d424d31' name='mgetp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <typedef-decl name='avl_index_t' type-id='type-id-146' id='type-id-151'/>
- <function-decl name='avl_insert' mangled-name='avl_insert' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_insert'>
- <parameter type-id='type-id-149' name='tree'/>
- <parameter type-id='type-id-89' name='new_data'/>
- <parameter type-id='type-id-151' name='where'/>
- <return type-id='type-id-84'/>
+ <pointer-type-def type-id='a84c031d' size-in-bits='64' id='26a90f95'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='os/linux/zone.c' language='LANG_C99'>
+ <typedef-decl name='zoneid_t' type-id='95e97e5e' id='4da03624'/>
+ <function-decl name='getzoneid' mangled-name='getzoneid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getzoneid'>
+ <return type-id='4da03624'/>
</function-decl>
- <pointer-type-def type-id='type-id-151' size-in-bits='64' id='type-id-152'/>
- <function-decl name='avl_find' mangled-name='avl_find' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_find'>
- <parameter type-id='type-id-149' name='tree'/>
- <parameter type-id='type-id-89' name='value'/>
- <parameter type-id='type-id-152' name='where'/>
- <return type-id='type-id-89'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='page.c' language='LANG_C99'>
+ <function-decl name='spl_pagesize' mangled-name='spl_pagesize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='spl_pagesize'>
+ <return type-id='b59d7dce'/>
</function-decl>
- <function-decl name='avl_nearest' mangled-name='avl_nearest' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_nearest'>
- <parameter type-id='type-id-149' name='tree'/>
- <parameter type-id='type-id-151' name='where'/>
- <parameter type-id='type-id-1' name='direction'/>
- <return type-id='type-id-89'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='strlcat.c' language='LANG_C99'>
+ <function-decl name='strlcat' mangled-name='strlcat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcat'>
+ <parameter type-id='26a90f95' name='dst'/>
+ <parameter type-id='80f4b756' name='src'/>
+ <parameter type-id='b59d7dce' name='dstsize'/>
+ <return type-id='b59d7dce'/>
</function-decl>
- <function-decl name='avl_last' mangled-name='avl_last' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_last'>
- <parameter type-id='type-id-149' name='tree'/>
- <return type-id='type-id-89'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='strlcpy.c' language='LANG_C99'>
+ <function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcpy'>
+ <parameter type-id='26a90f95' name='dst'/>
+ <parameter type-id='80f4b756' name='src'/>
+ <parameter type-id='b59d7dce' name='len'/>
+ <return type-id='b59d7dce'/>
</function-decl>
- <function-decl name='avl_first' mangled-name='avl_first' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_first'>
- <parameter type-id='type-id-149' name='tree'/>
- <return type-id='type-id-89'/>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='timestamp.c' language='LANG_C99'>
+ <function-decl name='print_timestamp' mangled-name='print_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='print_timestamp'>
+ <parameter type-id='3502e3ff' name='timestamp_fmt'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='avl_walk' mangled-name='avl_walk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='avl_walk'>
- <parameter type-id='type-id-149' name='tree'/>
- <parameter type-id='type-id-89' name='oldnode'/>
- <parameter type-id='type-id-1' name='left'/>
- <return type-id='type-id-89'/>
- </function-decl>
- <function-type size-in-bits='64' id='type-id-147'>
- <parameter type-id='type-id-89'/>
- <parameter type-id='type-id-89'/>
- <return type-id='type-id-1'/>
- </function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='thread_pool.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libtpool' language='LANG_C99'>
- <class-decl name='tpool' size-in-bits='2496' is-struct='yes' visibility='default' id='type-id-153'>
+ <abi-instr version='1.0' address-size='64' path='thread_pool.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='320' id='36c46961'>
+ <subrange length='40' type-id='7359adad' id='8f80b239'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='384' id='36d7f119'>
+ <subrange length='48' type-id='7359adad' id='8f6d2a81'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='448' id='6093ff7c'>
+ <subrange length='56' type-id='7359adad' id='f8137894'/>
+ </array-type-def>
+ <type-decl name='long long int' size-in-bits='64' id='1eb56b1e'/>
+ <type-decl name='long long unsigned int' size-in-bits='64' id='3a47d82b'/>
+ <type-decl name='short int' size-in-bits='16' id='a2185560'/>
+ <array-type-def dimensions='1' type-id='f0981eeb' size-in-bits='64' id='0d532ec1'>
+ <subrange length='2' type-id='7359adad' id='52efc4ef'/>
+ </array-type-def>
+ <class-decl name='tpool' size-in-bits='2496' is-struct='yes' visibility='default' id='88d1b7f9'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tp_forw' type-id='type-id-154' visibility='default'/>
+ <var-decl name='tp_forw' type-id='9cf59a50' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tp_back' type-id='type-id-154' visibility='default'/>
+ <var-decl name='tp_back' type-id='9cf59a50' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='tp_mutex' type-id='type-id-155' visibility='default'/>
+ <var-decl name='tp_mutex' type-id='7a6844eb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='tp_busycv' type-id='type-id-156' visibility='default'/>
+ <var-decl name='tp_busycv' type-id='62fab762' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='tp_workcv' type-id='type-id-156' visibility='default'/>
+ <var-decl name='tp_workcv' type-id='62fab762' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='tp_waitcv' type-id='type-id-156' visibility='default'/>
+ <var-decl name='tp_waitcv' type-id='62fab762' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1600'>
- <var-decl name='tp_active' type-id='type-id-157' visibility='default'/>
+ <var-decl name='tp_active' type-id='ad33e5e7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1664'>
- <var-decl name='tp_head' type-id='type-id-158' visibility='default'/>
+ <var-decl name='tp_head' type-id='f32b30e4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1728'>
- <var-decl name='tp_tail' type-id='type-id-158' visibility='default'/>
+ <var-decl name='tp_tail' type-id='f32b30e4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1792'>
- <var-decl name='tp_attr' type-id='type-id-159' visibility='default'/>
+ <var-decl name='tp_attr' type-id='7d8569fd' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2240'>
- <var-decl name='tp_flags' type-id='type-id-1' visibility='default'/>
+ <var-decl name='tp_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2272'>
- <var-decl name='tp_linger' type-id='type-id-35' visibility='default'/>
+ <var-decl name='tp_linger' type-id='3502e3ff' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2304'>
- <var-decl name='tp_njobs' type-id='type-id-1' visibility='default'/>
+ <var-decl name='tp_njobs' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2336'>
- <var-decl name='tp_minimum' type-id='type-id-1' visibility='default'/>
+ <var-decl name='tp_minimum' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2368'>
- <var-decl name='tp_maximum' type-id='type-id-1' visibility='default'/>
+ <var-decl name='tp_maximum' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2400'>
- <var-decl name='tp_current' type-id='type-id-1' visibility='default'/>
+ <var-decl name='tp_current' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='2432'>
- <var-decl name='tp_idle' type-id='type-id-1' visibility='default'/>
+ <var-decl name='tp_idle' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='tpool_t' type-id='type-id-153' id='type-id-160'/>
- <pointer-type-def type-id='type-id-160' size-in-bits='64' id='type-id-154'/>
- <union-decl name='__anonymous_union__' size-in-bits='320' is-anonymous='yes' visibility='default' id='type-id-161'>
+ <typedef-decl name='tpool_t' type-id='88d1b7f9' id='b1bbf10d'/>
+ <typedef-decl name='pthread_mutex_t' type-id='c4794498' id='7a6844eb'/>
+ <union-decl name='__anonymous_union__' size-in-bits='320' is-anonymous='yes' visibility='default' id='c4794498'>
<data-member access='private'>
- <var-decl name='__data' type-id='type-id-162' visibility='default'/>
+ <var-decl name='__data' type-id='4c734837' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__size' type-id='type-id-163' visibility='default'/>
+ <var-decl name='__size' type-id='36c46961' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__align' type-id='type-id-86' visibility='default'/>
+ <var-decl name='__align' type-id='bd54fe1a' visibility='default'/>
</data-member>
</union-decl>
- <class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-162'>
+ <class-decl name='__pthread_mutex_s' size-in-bits='320' is-struct='yes' visibility='default' id='4c734837'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__lock' type-id='type-id-1' visibility='default'/>
+ <var-decl name='__lock' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='__count' type-id='type-id-10' visibility='default'/>
+ <var-decl name='__count' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='__owner' type-id='type-id-1' visibility='default'/>
+ <var-decl name='__owner' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='__nusers' type-id='type-id-10' visibility='default'/>
+ <var-decl name='__nusers' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='__kind' type-id='type-id-1' visibility='default'/>
+ <var-decl name='__kind' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='160'>
- <var-decl name='__spins' type-id='type-id-164' visibility='default'/>
+ <var-decl name='__spins' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='176'>
- <var-decl name='__elision' type-id='type-id-164' visibility='default'/>
+ <var-decl name='__elision' type-id='a2185560' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='__list' type-id='type-id-165' visibility='default'/>
+ <var-decl name='__list' type-id='518fb49c' visibility='default'/>
</data-member>
</class-decl>
- <type-decl name='short int' size-in-bits='16' id='type-id-164'/>
- <class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-166'>
+ <typedef-decl name='__pthread_list_t' type-id='0e01899c' id='518fb49c'/>
+ <class-decl name='__pthread_internal_list' size-in-bits='128' is-struct='yes' visibility='default' id='0e01899c'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__prev' type-id='type-id-167' visibility='default'/>
+ <var-decl name='__prev' type-id='4d98cd5a' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='__next' type-id='type-id-167' visibility='default'/>
+ <var-decl name='__next' type-id='4d98cd5a' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-166' size-in-bits='64' id='type-id-167'/>
- <typedef-decl name='__pthread_list_t' type-id='type-id-166' id='type-id-165'/>
-
- <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='320' id='type-id-163'>
- <subrange length='40' type-id='type-id-12' id='type-id-168'/>
-
- </array-type-def>
- <typedef-decl name='pthread_mutex_t' type-id='type-id-161' id='type-id-155'/>
- <union-decl name='__anonymous_union__' size-in-bits='384' is-anonymous='yes' visibility='default' id='type-id-169'>
+ <typedef-decl name='pthread_cond_t' type-id='be6ed7a7' id='62fab762'/>
+ <union-decl name='__anonymous_union__1' size-in-bits='384' is-anonymous='yes' visibility='default' id='be6ed7a7'>
<data-member access='private'>
- <var-decl name='__data' type-id='type-id-170' visibility='default'/>
+ <var-decl name='__data' type-id='c987b47c' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__size' type-id='type-id-171' visibility='default'/>
+ <var-decl name='__size' type-id='36d7f119' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__align' type-id='type-id-172' visibility='default'/>
+ <var-decl name='__align' type-id='1eb56b1e' visibility='default'/>
</data-member>
</union-decl>
- <class-decl name='__pthread_cond_s' size-in-bits='384' is-struct='yes' visibility='default' id='type-id-170'>
+ <class-decl name='__pthread_cond_s' size-in-bits='384' is-struct='yes' visibility='default' id='c987b47c'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='' type-id='type-id-173' visibility='default'/>
+ <var-decl name='' type-id='2516de83' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='' type-id='fc82e0c5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='__g_refs' type-id='type-id-174' visibility='default'/>
+ <var-decl name='__g_refs' type-id='0d532ec1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='__g_size' type-id='type-id-174' visibility='default'/>
+ <var-decl name='__g_size' type-id='0d532ec1' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='__g1_orig_size' type-id='type-id-10' visibility='default'/>
+ <var-decl name='__g1_orig_size' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='__wrefs' type-id='type-id-10' visibility='default'/>
+ <var-decl name='__wrefs' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='__g_signals' type-id='type-id-174' visibility='default'/>
+ <var-decl name='__g_signals' type-id='0d532ec1' visibility='default'/>
</data-member>
</class-decl>
- <union-decl name='__anonymous_union__' size-in-bits='64' is-anonymous='yes' visibility='default' id='type-id-173'>
+ <union-decl name='__anonymous_union__2' size-in-bits='64' is-anonymous='yes' visibility='default' id='2516de83'>
<data-member access='private'>
- <var-decl name='__wseq' type-id='type-id-175' visibility='default'/>
+ <var-decl name='__wseq' type-id='3a47d82b' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__wseq32' type-id='type-id-176' visibility='default'/>
+ <var-decl name='__wseq32' type-id='2e971cfd' visibility='default'/>
</data-member>
</union-decl>
- <type-decl name='long long unsigned int' size-in-bits='64' id='type-id-175'/>
- <class-decl name='__anonymous_struct__' size-in-bits='64' is-struct='yes' is-anonymous='yes' visibility='default' id='type-id-176'>
+ <class-decl name='__anonymous_struct__' size-in-bits='64' is-struct='yes' is-anonymous='yes' visibility='default' id='2e971cfd'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='__low' type-id='type-id-10' visibility='default'/>
+ <var-decl name='__low' type-id='f0981eeb' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='__high' type-id='type-id-10' visibility='default'/>
+ <var-decl name='__high' type-id='f0981eeb' visibility='default'/>
</data-member>
</class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-10' size-in-bits='64' id='type-id-174'>
- <subrange length='2' type-id='type-id-12' id='type-id-137'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='384' id='type-id-171'>
- <subrange length='48' type-id='type-id-12' id='type-id-177'/>
-
- </array-type-def>
- <type-decl name='long long int' size-in-bits='64' id='type-id-172'/>
- <typedef-decl name='pthread_cond_t' type-id='type-id-169' id='type-id-156'/>
- <class-decl name='tpool_active' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-178'>
+ <union-decl name='__anonymous_union__3' size-in-bits='64' is-anonymous='yes' visibility='default' id='fc82e0c5'>
+ <data-member access='private'>
+ <var-decl name='__g1_start' type-id='3a47d82b' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='__g1_start32' type-id='2e971cfd' visibility='default'/>
+ </data-member>
+ </union-decl>
+ <class-decl name='tpool_active' size-in-bits='128' is-struct='yes' visibility='default' id='c8d086f4'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tpa_next' type-id='type-id-157' visibility='default'/>
+ <var-decl name='tpa_next' type-id='ad33e5e7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tpa_tid' type-id='type-id-179' visibility='default'/>
+ <var-decl name='tpa_tid' type-id='4051f5e7' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='tpool_active_t' type-id='type-id-178' id='type-id-180'/>
- <pointer-type-def type-id='type-id-180' size-in-bits='64' id='type-id-157'/>
- <typedef-decl name='pthread_t' type-id='type-id-12' id='type-id-179'/>
- <class-decl name='tpool_job' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-181'>
+ <typedef-decl name='tpool_active_t' type-id='c8d086f4' id='6fcda10e'/>
+ <typedef-decl name='pthread_t' type-id='7359adad' id='4051f5e7'/>
+ <class-decl name='tpool_job' size-in-bits='192' is-struct='yes' visibility='default' id='3b8579e5'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tpj_next' type-id='type-id-158' visibility='default'/>
+ <var-decl name='tpj_next' type-id='f32b30e4' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tpj_func' type-id='type-id-182' visibility='default'/>
+ <var-decl name='tpj_func' type-id='b7f9d8e6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='tpj_arg' type-id='type-id-89' visibility='default'/>
+ <var-decl name='tpj_arg' type-id='eaa32e2f' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='tpool_job_t' type-id='type-id-181' id='type-id-183'/>
- <pointer-type-def type-id='type-id-183' size-in-bits='64' id='type-id-158'/>
- <pointer-type-def type-id='type-id-184' size-in-bits='64' id='type-id-182'/>
- <union-decl name='pthread_attr_t' size-in-bits='448' visibility='default' id='type-id-185'>
+ <typedef-decl name='tpool_job_t' type-id='3b8579e5' id='66a0afc9'/>
+ <typedef-decl name='pthread_attr_t' type-id='b63afacd' id='7d8569fd'/>
+ <union-decl name='pthread_attr_t' size-in-bits='448' visibility='default' id='b63afacd'>
<data-member access='private'>
- <var-decl name='__size' type-id='type-id-186' visibility='default'/>
+ <var-decl name='__size' type-id='6093ff7c' visibility='default'/>
</data-member>
<data-member access='private'>
- <var-decl name='__align' type-id='type-id-86' visibility='default'/>
+ <var-decl name='__align' type-id='bd54fe1a' visibility='default'/>
</data-member>
</union-decl>
-
- <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='448' id='type-id-186'>
- <subrange length='56' type-id='type-id-12' id='type-id-187'/>
-
- </array-type-def>
- <typedef-decl name='pthread_attr_t' type-id='type-id-185' id='type-id-159'/>
+ <pointer-type-def type-id='0e01899c' size-in-bits='64' id='4d98cd5a'/>
+ <pointer-type-def type-id='7d8569fd' size-in-bits='64' id='7347a39e'/>
+ <pointer-type-def type-id='6fcda10e' size-in-bits='64' id='ad33e5e7'/>
+ <pointer-type-def type-id='66a0afc9' size-in-bits='64' id='f32b30e4'/>
+ <pointer-type-def type-id='b1bbf10d' size-in-bits='64' id='9cf59a50'/>
+ <pointer-type-def type-id='c5c76c9c' size-in-bits='64' id='b7f9d8e6'/>
<function-decl name='tpool_member' mangled-name='tpool_member' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_member'>
- <parameter type-id='type-id-154' name='tpool'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='9cf59a50' name='tpool'/>
+ <return type-id='95e97e5e'/>
</function-decl>
<function-decl name='tpool_resume' mangled-name='tpool_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_resume'>
- <parameter type-id='type-id-154' name='tpool'/>
- <return type-id='type-id-84'/>
+ <parameter type-id='9cf59a50' name='tpool'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_suspended' mangled-name='tpool_suspended' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_suspended'>
- <parameter type-id='type-id-154' name='tpool'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='9cf59a50' name='tpool'/>
+ <return type-id='95e97e5e'/>
</function-decl>
<function-decl name='tpool_suspend' mangled-name='tpool_suspend' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_suspend'>
- <parameter type-id='type-id-154' name='tpool'/>
- <return type-id='type-id-84'/>
+ <parameter type-id='9cf59a50' name='tpool'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_wait' mangled-name='tpool_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_wait'>
- <parameter type-id='type-id-154' name='tpool'/>
- <return type-id='type-id-84'/>
+ <parameter type-id='9cf59a50' name='tpool'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_abandon' mangled-name='tpool_abandon' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_abandon'>
- <parameter type-id='type-id-154' name='tpool'/>
- <return type-id='type-id-84'/>
+ <parameter type-id='9cf59a50' name='tpool'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_destroy' mangled-name='tpool_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_destroy'>
- <parameter type-id='type-id-154' name='tpool'/>
- <return type-id='type-id-84'/>
+ <parameter type-id='9cf59a50' name='tpool'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='tpool_dispatch' mangled-name='tpool_dispatch' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_dispatch'>
- <parameter type-id='type-id-154' name='tpool'/>
- <parameter type-id='type-id-182' name='func'/>
- <parameter type-id='type-id-89' name='arg'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='9cf59a50' name='tpool'/>
+ <parameter type-id='b7f9d8e6' name='func'/>
+ <parameter type-id='eaa32e2f' name='arg'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <pointer-type-def type-id='type-id-159' size-in-bits='64' id='type-id-188'/>
<function-decl name='tpool_create' mangled-name='tpool_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='tpool_create'>
- <parameter type-id='type-id-35' name='min_threads'/>
- <parameter type-id='type-id-35' name='max_threads'/>
- <parameter type-id='type-id-35' name='linger'/>
- <parameter type-id='type-id-188' name='attr'/>
- <return type-id='type-id-154'/>
- </function-decl>
- <function-decl name='pthread_self' mangled-name='pthread_self' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_cond_broadcast' mangled-name='pthread_cond_broadcast' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__sigsetjmp' mangled-name='__sigsetjmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__pthread_register_cancel' mangled-name='__pthread_register_cancel' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_cond_wait' mangled-name='pthread_cond_wait' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__pthread_unregister_cancel' mangled-name='__pthread_unregister_cancel' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__pthread_unwind_next' mangled-name='__pthread_unwind_next' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_cancel' mangled-name='pthread_cancel' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_cond_signal' mangled-name='pthread_cond_signal' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_init' mangled-name='pthread_attr_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_getaffinity_np' mangled-name='pthread_attr_getaffinity_np' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_destroy' mangled-name='pthread_attr_destroy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_setaffinity_np' mangled-name='pthread_attr_setaffinity_np' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_getdetachstate' mangled-name='pthread_attr_getdetachstate' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_setdetachstate' mangled-name='pthread_attr_setdetachstate' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_getguardsize' mangled-name='pthread_attr_getguardsize' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_setguardsize' mangled-name='pthread_attr_setguardsize' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_getinheritsched' mangled-name='pthread_attr_getinheritsched' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_setinheritsched' mangled-name='pthread_attr_setinheritsched' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_getschedparam' mangled-name='pthread_attr_getschedparam' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_setschedparam' mangled-name='pthread_attr_setschedparam' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_getschedpolicy' mangled-name='pthread_attr_getschedpolicy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_setschedpolicy' mangled-name='pthread_attr_setschedpolicy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_getscope' mangled-name='pthread_attr_getscope' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_setscope' mangled-name='pthread_attr_setscope' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_getstack' mangled-name='pthread_attr_getstack' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_attr_setstack' mangled-name='pthread_attr_setstack' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_cond_init' mangled-name='pthread_cond_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_sigmask' mangled-name='pthread_sigmask' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_create' mangled-name='pthread_create' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_cond_timedwait' mangled-name='pthread_cond_timedwait' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_setcanceltype' mangled-name='pthread_setcanceltype' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='pthread_setcancelstate' mangled-name='pthread_setcancelstate' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-type size-in-bits='64' id='type-id-184'>
- <parameter type-id='type-id-89'/>
- <return type-id='type-id-84'/>
+ <parameter type-id='3502e3ff' name='min_threads'/>
+ <parameter type-id='3502e3ff' name='max_threads'/>
+ <parameter type-id='3502e3ff' name='linger'/>
+ <parameter type-id='7347a39e' name='attr'/>
+ <return type-id='9cf59a50'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='c5c76c9c'>
+ <parameter type-id='eaa32e2f'/>
+ <return type-id='48b5725f'/>
</function-type>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='assert.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <var-decl name='libspl_assert_ok' type-id='type-id-1' mangled-name='libspl_assert_ok' visibility='default' elf-symbol-id='libspl_assert_ok'/>
- <function-decl name='libspl_assertf' mangled-name='libspl_assertf' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libspl_assertf'>
- <parameter type-id='type-id-4' name='file'/>
- <parameter type-id='type-id-4' name='func'/>
- <parameter type-id='type-id-1' name='line'/>
- <parameter type-id='type-id-4' name='format'/>
- <parameter is-variadic='yes'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__vfprintf_chk' mangled-name='__vfprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__builtin_fputc' mangled-name='fputc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='abort' mangled-name='abort' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='atomic.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='membar_consumer' mangled-name='membar_consumer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_consumer'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='membar_producer' mangled-name='membar_producer' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_producer'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='membar_enter' mangled-name='membar_enter' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='membar_enter'>
- <return type-id='type-id-84'/>
- </function-decl>
- <qualified-type-def type-id='type-id-143' volatile='yes' id='type-id-189'/>
- <pointer-type-def type-id='type-id-189' size-in-bits='64' id='type-id-190'/>
- <function-decl name='atomic_clear_long_excl' mangled-name='atomic_clear_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_clear_long_excl'>
- <parameter type-id='type-id-190' name='target'/>
- <parameter type-id='type-id-35' name='value'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='atomic_set_long_excl' mangled-name='atomic_set_long_excl' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_set_long_excl'>
- <parameter type-id='type-id-190' name='target'/>
- <parameter type-id='type-id-35' name='value'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <qualified-type-def type-id='type-id-84' volatile='yes' id='type-id-191'/>
- <pointer-type-def type-id='type-id-191' size-in-bits='64' id='type-id-192'/>
- <function-decl name='atomic_swap_ptr' mangled-name='atomic_swap_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ptr'>
- <parameter type-id='type-id-192' name='target'/>
- <parameter type-id='type-id-89' name='bits'/>
- <return type-id='type-id-89'/>
- </function-decl>
- <function-decl name='atomic_swap_ulong' mangled-name='atomic_swap_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_ulong'>
- <parameter type-id='type-id-190' name='target'/>
- <parameter type-id='type-id-143' name='bits'/>
- <return type-id='type-id-143'/>
- </function-decl>
- <qualified-type-def type-id='type-id-7' volatile='yes' id='type-id-193'/>
- <pointer-type-def type-id='type-id-193' size-in-bits='64' id='type-id-194'/>
- <function-decl name='atomic_swap_32' mangled-name='atomic_swap_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_32'>
- <parameter type-id='type-id-194' name='target'/>
- <parameter type-id='type-id-7' name='bits'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <type-decl name='unsigned short int' size-in-bits='16' id='type-id-195'/>
- <typedef-decl name='__uint16_t' type-id='type-id-195' id='type-id-196'/>
- <typedef-decl name='uint16_t' type-id='type-id-196' id='type-id-197'/>
- <qualified-type-def type-id='type-id-197' volatile='yes' id='type-id-198'/>
- <pointer-type-def type-id='type-id-198' size-in-bits='64' id='type-id-199'/>
- <function-decl name='atomic_swap_16' mangled-name='atomic_swap_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_16'>
- <parameter type-id='type-id-199' name='target'/>
- <parameter type-id='type-id-197' name='bits'/>
- <return type-id='type-id-197'/>
- </function-decl>
- <qualified-type-def type-id='type-id-33' volatile='yes' id='type-id-200'/>
- <pointer-type-def type-id='type-id-200' size-in-bits='64' id='type-id-201'/>
- <function-decl name='atomic_swap_8' mangled-name='atomic_swap_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_swap_8'>
- <parameter type-id='type-id-201' name='target'/>
- <parameter type-id='type-id-33' name='bits'/>
- <return type-id='type-id-33'/>
- </function-decl>
- <function-decl name='atomic_cas_ptr' mangled-name='atomic_cas_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ptr'>
- <parameter type-id='type-id-192' name='target'/>
- <parameter type-id='type-id-89' name='exp'/>
- <parameter type-id='type-id-89' name='des'/>
- <return type-id='type-id-89'/>
- </function-decl>
- <function-decl name='atomic_and_ulong_nv' mangled-name='atomic_and_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong_nv'>
- <parameter type-id='type-id-190' name='target'/>
- <parameter type-id='type-id-143' name='bits'/>
- <return type-id='type-id-143'/>
- </function-decl>
- <function-decl name='atomic_and_32_nv' mangled-name='atomic_and_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32_nv'>
- <parameter type-id='type-id-194' name='target'/>
- <parameter type-id='type-id-7' name='bits'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <function-decl name='atomic_and_16_nv' mangled-name='atomic_and_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16_nv'>
- <parameter type-id='type-id-199' name='target'/>
- <parameter type-id='type-id-197' name='bits'/>
- <return type-id='type-id-197'/>
- </function-decl>
- <function-decl name='atomic_and_8_nv' mangled-name='atomic_and_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8_nv'>
- <parameter type-id='type-id-201' name='target'/>
- <parameter type-id='type-id-33' name='bits'/>
- <return type-id='type-id-33'/>
- </function-decl>
- <function-decl name='atomic_or_ulong_nv' mangled-name='atomic_or_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong_nv'>
- <parameter type-id='type-id-190' name='target'/>
- <parameter type-id='type-id-143' name='bits'/>
- <return type-id='type-id-143'/>
- </function-decl>
- <function-decl name='atomic_or_32_nv' mangled-name='atomic_or_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32_nv'>
- <parameter type-id='type-id-194' name='target'/>
- <parameter type-id='type-id-7' name='bits'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <function-decl name='atomic_or_16_nv' mangled-name='atomic_or_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16_nv'>
- <parameter type-id='type-id-199' name='target'/>
- <parameter type-id='type-id-197' name='bits'/>
- <return type-id='type-id-197'/>
- </function-decl>
- <function-decl name='atomic_or_8_nv' mangled-name='atomic_or_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8_nv'>
- <parameter type-id='type-id-201' name='target'/>
- <parameter type-id='type-id-33' name='bits'/>
- <return type-id='type-id-33'/>
- </function-decl>
- <function-decl name='atomic_sub_ptr_nv' mangled-name='atomic_sub_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr_nv'>
- <parameter type-id='type-id-192' name='target'/>
- <parameter type-id='type-id-88' name='bits'/>
- <return type-id='type-id-89'/>
- </function-decl>
- <function-decl name='atomic_sub_long_nv' mangled-name='atomic_sub_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long_nv'>
- <parameter type-id='type-id-190' name='target'/>
- <parameter type-id='type-id-86' name='bits'/>
- <return type-id='type-id-143'/>
- </function-decl>
- <function-decl name='atomic_sub_32_nv' mangled-name='atomic_sub_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32_nv'>
- <parameter type-id='type-id-194' name='target'/>
- <parameter type-id='type-id-6' name='bits'/>
- <return type-id='type-id-7'/>
- </function-decl>
- <typedef-decl name='__int16_t' type-id='type-id-164' id='type-id-202'/>
- <typedef-decl name='int16_t' type-id='type-id-202' id='type-id-203'/>
- <function-decl name='atomic_sub_16_nv' mangled-name='atomic_sub_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16_nv'>
- <parameter type-id='type-id-199' name='target'/>
- <parameter type-id='type-id-203' name='bits'/>
- <return type-id='type-id-197'/>
- </function-decl>
- <type-decl name='signed char' size-in-bits='8' id='type-id-204'/>
- <typedef-decl name='__int8_t' type-id='type-id-204' id='type-id-205'/>
- <typedef-decl name='int8_t' type-id='type-id-205' id='type-id-206'/>
- <function-decl name='atomic_sub_8_nv' mangled-name='atomic_sub_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8_nv'>
- <parameter type-id='type-id-201' name='target'/>
- <parameter type-id='type-id-206' name='bits'/>
- <return type-id='type-id-33'/>
- </function-decl>
- <function-decl name='atomic_add_ptr_nv' mangled-name='atomic_add_ptr_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr_nv'>
- <parameter type-id='type-id-192' name='target'/>
- <parameter type-id='type-id-88' name='bits'/>
- <return type-id='type-id-89'/>
- </function-decl>
- <function-decl name='atomic_add_long_nv' mangled-name='atomic_add_long_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long_nv'>
- <parameter type-id='type-id-190' name='target'/>
- <parameter type-id='type-id-86' name='bits'/>
- <return type-id='type-id-143'/>
+ <abi-instr version='1.0' address-size='64' path='libzfs_core.c' language='LANG_C99'>
+ <type-decl name='char' size-in-bits='8' id='a84c031d'/>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='2048' id='d1617432'>
+ <subrange length='256' type-id='7359adad' id='36e5b9fa'/>
+ </array-type-def>
+ <type-decl name='int' size-in-bits='32' id='95e97e5e'/>
+ <array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='2176' id='8c2bcad1'>
+ <subrange length='34' type-id='7359adad' id='6a6a7e00'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='256' id='85c64d26'>
+ <subrange length='4' type-id='7359adad' id='16fe7105'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='b96825af' size-in-bits='96' id='fa8ef949'>
+ <subrange length='12' type-id='7359adad' id='84827bdc'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='b96825af' size-in-bits='128' id='fa9986a5'>
+ <subrange length='16' type-id='7359adad' id='848d0938'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='b96825af' size-in-bits='24' id='d3490169'>
+ <subrange length='3' type-id='7359adad' id='56f209d2'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='b96825af' size-in-bits='40' id='0f4ddd0b'>
+ <subrange length='5' type-id='7359adad' id='53010e10'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='b96825af' size-in-bits='48' id='0f562bd0'>
+ <subrange length='6' type-id='7359adad' id='52fa524b'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='b96825af' size-in-bits='64' id='13339fda'>
+ <subrange length='8' type-id='7359adad' id='56e0c0b1'/>
+ </array-type-def>
+ <type-decl name='unnamed-enum-underlying-type-32' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='9cac1fee'/>
+ <type-decl name='unsigned char' size-in-bits='8' id='002ac4a6'/>
+ <type-decl name='unsigned int' size-in-bits='32' id='f0981eeb'/>
+ <type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
+ <type-decl name='void' id='48b5725f'/>
+ <typedef-decl name='nvlist_t' type-id='ac266fd9' id='8e8d4be3'/>
+ <class-decl name='nvlist' size-in-bits='192' is-struct='yes' visibility='default' id='ac266fd9'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='nvl_version' type-id='3ff5601b' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='nvl_nvflag' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='nvl_priv' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='nvl_flag' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='nvl_pad' type-id='3ff5601b' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='int32_t' type-id='33f57a65' id='3ff5601b'/>
+ <typedef-decl name='__int32_t' type-id='95e97e5e' id='33f57a65'/>
+ <typedef-decl name='uint32_t' type-id='62f1140c' id='8f92235e'/>
+ <typedef-decl name='__uint32_t' type-id='f0981eeb' id='62f1140c'/>
+ <typedef-decl name='uint64_t' type-id='8910171f' id='9c313c2d'/>
+ <typedef-decl name='__uint64_t' type-id='7359adad' id='8910171f'/>
+ <typedef-decl name='zfs_wait_activity_t' type-id='08f5ca17' id='3024501a'/>
+ <enum-decl name='__anonymous_enum__' is-anonymous='yes' id='08f5ca17'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZFS_WAIT_DELETEQ' value='0'/>
+ <enumerator name='ZFS_WAIT_NUM_ACTIVITIES' value='1'/>
+ </enum-decl>
+ <typedef-decl name='boolean_t' type-id='40ed39d2' id='c19b74c3'/>
+ <enum-decl name='__anonymous_enum__1' is-anonymous='yes' id='40ed39d2'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='B_FALSE' value='0'/>
+ <enumerator name='B_TRUE' value='1'/>
+ </enum-decl>
+ <typedef-decl name='zpool_wait_activity_t' type-id='3fed383f' id='73446457'/>
+ <enum-decl name='__anonymous_enum__2' is-anonymous='yes' id='3fed383f'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZPOOL_WAIT_CKPT_DISCARD' value='0'/>
+ <enumerator name='ZPOOL_WAIT_FREE' value='1'/>
+ <enumerator name='ZPOOL_WAIT_INITIALIZE' value='2'/>
+ <enumerator name='ZPOOL_WAIT_REPLACE' value='3'/>
+ <enumerator name='ZPOOL_WAIT_REMOVE' value='4'/>
+ <enumerator name='ZPOOL_WAIT_RESILVER' value='5'/>
+ <enumerator name='ZPOOL_WAIT_SCRUB' value='6'/>
+ <enumerator name='ZPOOL_WAIT_TRIM' value='7'/>
+ <enumerator name='ZPOOL_WAIT_NUM_ACTIVITIES' value='8'/>
+ </enum-decl>
+ <typedef-decl name='pool_trim_func_t' type-id='54ed608a' id='b1146b8d'/>
+ <enum-decl name='pool_trim_func' id='54ed608a'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='POOL_TRIM_START' value='0'/>
+ <enumerator name='POOL_TRIM_CANCEL' value='1'/>
+ <enumerator name='POOL_TRIM_SUSPEND' value='2'/>
+ <enumerator name='POOL_TRIM_FUNCS' value='3'/>
+ </enum-decl>
+ <typedef-decl name='pool_initialize_func_t' type-id='5c246ad4' id='7063e1ab'/>
+ <enum-decl name='pool_initialize_func' id='5c246ad4'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='POOL_INITIALIZE_START' value='0'/>
+ <enumerator name='POOL_INITIALIZE_CANCEL' value='1'/>
+ <enumerator name='POOL_INITIALIZE_SUSPEND' value='2'/>
+ <enumerator name='POOL_INITIALIZE_FUNCS' value='3'/>
+ </enum-decl>
+ <typedef-decl name='uint8_t' type-id='c51d6389' id='b96825af'/>
+ <typedef-decl name='__uint8_t' type-id='002ac4a6' id='c51d6389'/>
+ <typedef-decl name='uint_t' type-id='f0981eeb' id='3502e3ff'/>
+ <typedef-decl name='dmu_replay_record_t' type-id='781a52d7' id='8b8fc893'/>
+ <class-decl name='dmu_replay_record' size-in-bits='2496' is-struct='yes' visibility='default' id='781a52d7'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_type' type-id='3eed36ac' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='32'>
+ <var-decl name='drr_payloadlen' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='drr_u' type-id='edc8c94a' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <enum-decl name='__anonymous_enum__3' is-anonymous='yes' id='3eed36ac'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='DRR_BEGIN' value='0'/>
+ <enumerator name='DRR_OBJECT' value='1'/>
+ <enumerator name='DRR_FREEOBJECTS' value='2'/>
+ <enumerator name='DRR_WRITE' value='3'/>
+ <enumerator name='DRR_FREE' value='4'/>
+ <enumerator name='DRR_END' value='5'/>
+ <enumerator name='DRR_WRITE_BYREF' value='6'/>
+ <enumerator name='DRR_SPILL' value='7'/>
+ <enumerator name='DRR_WRITE_EMBEDDED' value='8'/>
+ <enumerator name='DRR_OBJECT_RANGE' value='9'/>
+ <enumerator name='DRR_REDACT' value='10'/>
+ <enumerator name='DRR_NUMTYPES' value='11'/>
+ </enum-decl>
+ <union-decl name='__anonymous_union__' size-in-bits='2432' is-anonymous='yes' visibility='default' id='edc8c94a'>
+ <data-member access='private'>
+ <var-decl name='drr_begin' type-id='09fcdc01' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='drr_end' type-id='6ee25631' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='drr_object' type-id='f9ad530b' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='drr_freeobjects' type-id='a27d958e' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='drr_write' type-id='4cc69e4b' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='drr_free' type-id='c836cfd2' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='drr_write_byref' type-id='e511cdce' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='drr_spill' type-id='1e69a80a' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='drr_write_embedded' type-id='98b1345e' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='drr_object_range' type-id='aba1f9e1' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='drr_redact' type-id='50389039' visibility='default'/>
+ </data-member>
+ <data-member access='private'>
+ <var-decl name='drr_checksum' type-id='a5fe3647' visibility='default'/>
+ </data-member>
+ </union-decl>
+ <class-decl name='drr_begin' size-in-bits='2432' is-struct='yes' visibility='default' id='09fcdc01'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_magic' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='drr_versioninfo' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='drr_creation_time' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='drr_type' type-id='230f1e16' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='224'>
+ <var-decl name='drr_flags' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='drr_fromguid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='drr_toname' type-id='d1617432' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='dmu_objset_type_t' type-id='6b1b19f9' id='230f1e16'/>
+ <enum-decl name='dmu_objset_type' id='6b1b19f9'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='DMU_OST_NONE' value='0'/>
+ <enumerator name='DMU_OST_META' value='1'/>
+ <enumerator name='DMU_OST_ZFS' value='2'/>
+ <enumerator name='DMU_OST_ZVOL' value='3'/>
+ <enumerator name='DMU_OST_OTHER' value='4'/>
+ <enumerator name='DMU_OST_ANY' value='5'/>
+ <enumerator name='DMU_OST_NUMTYPES' value='6'/>
+ </enum-decl>
+ <class-decl name='drr_end' size-in-bits='320' is-struct='yes' visibility='default' id='6ee25631'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_checksum' type-id='39730d0b' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='zio_cksum_t' type-id='1d53e28b' id='39730d0b'/>
+ <class-decl name='zio_cksum' size-in-bits='256' is-struct='yes' visibility='default' id='1d53e28b'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='zc_word' type-id='85c64d26' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='drr_object' size-in-bits='448' is-struct='yes' visibility='default' id='f9ad530b'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='drr_type' type-id='5c9d8906' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='drr_bonustype' type-id='5c9d8906' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='drr_blksz' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='160'>
+ <var-decl name='drr_bonuslen' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='drr_checksumtype' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='200'>
+ <var-decl name='drr_compress' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='208'>
+ <var-decl name='drr_dn_slots' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='216'>
+ <var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='224'>
+ <var-decl name='drr_raw_bonuslen' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='drr_indblkshift' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='328'>
+ <var-decl name='drr_nlevels' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='336'>
+ <var-decl name='drr_nblkptr' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='344'>
+ <var-decl name='drr_pad' type-id='0f4ddd0b' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='drr_maxblkid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='dmu_object_type_t' type-id='04b3b0b9' id='5c9d8906'/>
+ <enum-decl name='dmu_object_type' id='04b3b0b9'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='DMU_OT_NONE' value='0'/>
+ <enumerator name='DMU_OT_OBJECT_DIRECTORY' value='1'/>
+ <enumerator name='DMU_OT_OBJECT_ARRAY' value='2'/>
+ <enumerator name='DMU_OT_PACKED_NVLIST' value='3'/>
+ <enumerator name='DMU_OT_PACKED_NVLIST_SIZE' value='4'/>
+ <enumerator name='DMU_OT_BPOBJ' value='5'/>
+ <enumerator name='DMU_OT_BPOBJ_HDR' value='6'/>
+ <enumerator name='DMU_OT_SPACE_MAP_HEADER' value='7'/>
+ <enumerator name='DMU_OT_SPACE_MAP' value='8'/>
+ <enumerator name='DMU_OT_INTENT_LOG' value='9'/>
+ <enumerator name='DMU_OT_DNODE' value='10'/>
+ <enumerator name='DMU_OT_OBJSET' value='11'/>
+ <enumerator name='DMU_OT_DSL_DIR' value='12'/>
+ <enumerator name='DMU_OT_DSL_DIR_CHILD_MAP' value='13'/>
+ <enumerator name='DMU_OT_DSL_DS_SNAP_MAP' value='14'/>
+ <enumerator name='DMU_OT_DSL_PROPS' value='15'/>
+ <enumerator name='DMU_OT_DSL_DATASET' value='16'/>
+ <enumerator name='DMU_OT_ZNODE' value='17'/>
+ <enumerator name='DMU_OT_OLDACL' value='18'/>
+ <enumerator name='DMU_OT_PLAIN_FILE_CONTENTS' value='19'/>
+ <enumerator name='DMU_OT_DIRECTORY_CONTENTS' value='20'/>
+ <enumerator name='DMU_OT_MASTER_NODE' value='21'/>
+ <enumerator name='DMU_OT_UNLINKED_SET' value='22'/>
+ <enumerator name='DMU_OT_ZVOL' value='23'/>
+ <enumerator name='DMU_OT_ZVOL_PROP' value='24'/>
+ <enumerator name='DMU_OT_PLAIN_OTHER' value='25'/>
+ <enumerator name='DMU_OT_UINT64_OTHER' value='26'/>
+ <enumerator name='DMU_OT_ZAP_OTHER' value='27'/>
+ <enumerator name='DMU_OT_ERROR_LOG' value='28'/>
+ <enumerator name='DMU_OT_SPA_HISTORY' value='29'/>
+ <enumerator name='DMU_OT_SPA_HISTORY_OFFSETS' value='30'/>
+ <enumerator name='DMU_OT_POOL_PROPS' value='31'/>
+ <enumerator name='DMU_OT_DSL_PERMS' value='32'/>
+ <enumerator name='DMU_OT_ACL' value='33'/>
+ <enumerator name='DMU_OT_SYSACL' value='34'/>
+ <enumerator name='DMU_OT_FUID' value='35'/>
+ <enumerator name='DMU_OT_FUID_SIZE' value='36'/>
+ <enumerator name='DMU_OT_NEXT_CLONES' value='37'/>
+ <enumerator name='DMU_OT_SCAN_QUEUE' value='38'/>
+ <enumerator name='DMU_OT_USERGROUP_USED' value='39'/>
+ <enumerator name='DMU_OT_USERGROUP_QUOTA' value='40'/>
+ <enumerator name='DMU_OT_USERREFS' value='41'/>
+ <enumerator name='DMU_OT_DDT_ZAP' value='42'/>
+ <enumerator name='DMU_OT_DDT_STATS' value='43'/>
+ <enumerator name='DMU_OT_SA' value='44'/>
+ <enumerator name='DMU_OT_SA_MASTER_NODE' value='45'/>
+ <enumerator name='DMU_OT_SA_ATTR_REGISTRATION' value='46'/>
+ <enumerator name='DMU_OT_SA_ATTR_LAYOUTS' value='47'/>
+ <enumerator name='DMU_OT_SCAN_XLATE' value='48'/>
+ <enumerator name='DMU_OT_DEDUP' value='49'/>
+ <enumerator name='DMU_OT_DEADLIST' value='50'/>
+ <enumerator name='DMU_OT_DEADLIST_HDR' value='51'/>
+ <enumerator name='DMU_OT_DSL_CLONES' value='52'/>
+ <enumerator name='DMU_OT_BPOBJ_SUBOBJ' value='53'/>
+ <enumerator name='DMU_OT_NUMTYPES' value='54'/>
+ <enumerator name='DMU_OTN_UINT8_DATA' value='128'/>
+ <enumerator name='DMU_OTN_UINT8_METADATA' value='192'/>
+ <enumerator name='DMU_OTN_UINT16_DATA' value='129'/>
+ <enumerator name='DMU_OTN_UINT16_METADATA' value='193'/>
+ <enumerator name='DMU_OTN_UINT32_DATA' value='130'/>
+ <enumerator name='DMU_OTN_UINT32_METADATA' value='194'/>
+ <enumerator name='DMU_OTN_UINT64_DATA' value='131'/>
+ <enumerator name='DMU_OTN_UINT64_METADATA' value='195'/>
+ <enumerator name='DMU_OTN_ZAP_DATA' value='132'/>
+ <enumerator name='DMU_OTN_ZAP_METADATA' value='196'/>
+ <enumerator name='DMU_OTN_UINT8_ENC_DATA' value='160'/>
+ <enumerator name='DMU_OTN_UINT8_ENC_METADATA' value='224'/>
+ <enumerator name='DMU_OTN_UINT16_ENC_DATA' value='161'/>
+ <enumerator name='DMU_OTN_UINT16_ENC_METADATA' value='225'/>
+ <enumerator name='DMU_OTN_UINT32_ENC_DATA' value='162'/>
+ <enumerator name='DMU_OTN_UINT32_ENC_METADATA' value='226'/>
+ <enumerator name='DMU_OTN_UINT64_ENC_DATA' value='163'/>
+ <enumerator name='DMU_OTN_UINT64_ENC_METADATA' value='227'/>
+ <enumerator name='DMU_OTN_ZAP_ENC_DATA' value='164'/>
+ <enumerator name='DMU_OTN_ZAP_ENC_METADATA' value='228'/>
+ </enum-decl>
+ <class-decl name='drr_freeobjects' size-in-bits='192' is-struct='yes' visibility='default' id='a27d958e'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_firstobj' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='drr_numobjs' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='drr_write' size-in-bits='1088' is-struct='yes' visibility='default' id='4cc69e4b'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='drr_type' type-id='5c9d8906' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='96'>
+ <var-decl name='drr_pad' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='drr_logical_size' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='drr_checksumtype' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='328'>
+ <var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='336'>
+ <var-decl name='drr_compressiontype' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='344'>
+ <var-decl name='drr_pad2' type-id='0f4ddd0b' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='drr_key' type-id='67f6d2cf' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='704'>
+ <var-decl name='drr_compressed_size' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='768'>
+ <var-decl name='drr_salt' type-id='13339fda' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='832'>
+ <var-decl name='drr_iv' type-id='fa8ef949' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='928'>
+ <var-decl name='drr_mac' type-id='fa9986a5' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <typedef-decl name='ddt_key_t' type-id='e0a4a1cb' id='67f6d2cf'/>
+ <class-decl name='ddt_key' size-in-bits='320' is-struct='yes' visibility='default' id='e0a4a1cb'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='ddk_cksum' type-id='39730d0b' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='ddk_prop' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='drr_free' size-in-bits='256' is-struct='yes' visibility='default' id='c836cfd2'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='drr_write_byref' size-in-bits='832' is-struct='yes' visibility='default' id='e511cdce'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='drr_refguid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='drr_refobject' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='drr_refoffset' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='drr_checksumtype' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='456'>
+ <var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='464'>
+ <var-decl name='drr_pad2' type-id='0f562bd0' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='drr_key' type-id='67f6d2cf' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='drr_spill' size-in-bits='640' is-struct='yes' visibility='default' id='1e69a80a'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='200'>
+ <var-decl name='drr_compressiontype' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='208'>
+ <var-decl name='drr_pad' type-id='0f562bd0' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='drr_compressed_size' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='drr_salt' type-id='13339fda' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='drr_iv' type-id='fa8ef949' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='480'>
+ <var-decl name='drr_mac' type-id='fa9986a5' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='608'>
+ <var-decl name='drr_type' type-id='5c9d8906' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='drr_write_embedded' size-in-bits='384' is-struct='yes' visibility='default' id='98b1345e'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='drr_compression' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='264'>
+ <var-decl name='drr_etype' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='272'>
+ <var-decl name='drr_pad' type-id='0f562bd0' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='drr_lsize' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='352'>
+ <var-decl name='drr_psize' type-id='8f92235e' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='drr_object_range' size-in-bits='512' is-struct='yes' visibility='default' id='aba1f9e1'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_firstobj' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='drr_numslots' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='drr_salt' type-id='13339fda' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='drr_iv' type-id='fa8ef949' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='352'>
+ <var-decl name='drr_mac' type-id='fa9986a5' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='480'>
+ <var-decl name='drr_flags' type-id='b96825af' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='488'>
+ <var-decl name='drr_pad' type-id='d3490169' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='drr_redact' size-in-bits='256' is-struct='yes' visibility='default' id='50389039'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_object' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='drr_offset' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='drr_length' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='drr_toguid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <class-decl name='drr_checksum' size-in-bits='2432' is-struct='yes' visibility='default' id='a5fe3647'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='drr_pad' type-id='8c2bcad1' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='2176'>
+ <var-decl name='drr_checksum' type-id='39730d0b' visibility='default'/>
+ </data-member>
+ </class-decl>
+ <enum-decl name='lzc_send_flags' id='bfbd3c8e'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='LZC_SEND_FLAG_EMBED_DATA' value='1'/>
+ <enumerator name='LZC_SEND_FLAG_LARGE_BLOCK' value='2'/>
+ <enumerator name='LZC_SEND_FLAG_COMPRESS' value='4'/>
+ <enumerator name='LZC_SEND_FLAG_RAW' value='8'/>
+ <enumerator name='LZC_SEND_FLAG_SAVED' value='16'/>
+ </enum-decl>
+ <enum-decl name='lzc_dataset_type' id='bc9887f1'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='LZC_DATSET_TYPE_ZFS' value='2'/>
+ <enumerator name='LZC_DATSET_TYPE_ZVOL' value='3'/>
+ </enum-decl>
+ <pointer-type-def type-id='c19b74c3' size-in-bits='64' id='37e3bd22'/>
+ <pointer-type-def type-id='a84c031d' size-in-bits='64' id='26a90f95'/>
+ <qualified-type-def type-id='a84c031d' const='yes' id='9b45d938'/>
+ <pointer-type-def type-id='9b45d938' size-in-bits='64' id='80f4b756'/>
+ <qualified-type-def type-id='8b8fc893' const='yes' id='9623bc03'/>
+ <pointer-type-def type-id='9623bc03' size-in-bits='64' id='8341348b'/>
+ <qualified-type-def type-id='8e8d4be3' const='yes' id='693c3853'/>
+ <pointer-type-def type-id='693c3853' size-in-bits='64' id='22cce67b'/>
+ <pointer-type-def type-id='8e8d4be3' size-in-bits='64' id='5ce45b60'/>
+ <pointer-type-def type-id='5ce45b60' size-in-bits='64' id='857bb57e'/>
+ <pointer-type-def type-id='9c313c2d' size-in-bits='64' id='5d6479ae'/>
+ <pointer-type-def type-id='b96825af' size-in-bits='64' id='ae3e8ca6'/>
+ <function-decl name='lzc_get_bootenv' mangled-name='lzc_get_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_bootenv'>
+ <parameter type-id='80f4b756' name='pool'/>
+ <parameter type-id='857bb57e' name='outnvl'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_add_32_nv' mangled-name='atomic_add_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32_nv'>
- <parameter type-id='type-id-194' name='target'/>
- <parameter type-id='type-id-6' name='bits'/>
- <return type-id='type-id-7'/>
+ <function-decl name='lzc_set_bootenv' mangled-name='lzc_set_bootenv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_set_bootenv'>
+ <parameter type-id='80f4b756' name='pool'/>
+ <parameter type-id='22cce67b' name='env'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_add_16_nv' mangled-name='atomic_add_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16_nv'>
- <parameter type-id='type-id-199' name='target'/>
- <parameter type-id='type-id-203' name='bits'/>
- <return type-id='type-id-197'/>
+ <function-decl name='lzc_wait_fs' mangled-name='lzc_wait_fs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_wait_fs'>
+ <parameter type-id='80f4b756' name='fs'/>
+ <parameter type-id='3024501a' name='activity'/>
+ <parameter type-id='37e3bd22' name='waited'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_add_8_nv' mangled-name='atomic_add_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8_nv'>
- <parameter type-id='type-id-201' name='target'/>
- <parameter type-id='type-id-206' name='bits'/>
- <return type-id='type-id-33'/>
+ <function-decl name='lzc_wait_tag' mangled-name='lzc_wait_tag' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_wait_tag'>
+ <parameter type-id='80f4b756' name='pool'/>
+ <parameter type-id='73446457' name='activity'/>
+ <parameter type-id='9c313c2d' name='tag'/>
+ <parameter type-id='37e3bd22' name='waited'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_dec_ulong_nv' mangled-name='atomic_dec_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong_nv'>
- <parameter type-id='type-id-190' name='target'/>
- <return type-id='type-id-143'/>
+ <function-decl name='lzc_wait' mangled-name='lzc_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_wait'>
+ <parameter type-id='80f4b756' name='pool'/>
+ <parameter type-id='73446457' name='activity'/>
+ <parameter type-id='37e3bd22' name='waited'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_dec_32_nv' mangled-name='atomic_dec_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32_nv'>
- <parameter type-id='type-id-194' name='target'/>
- <return type-id='type-id-7'/>
+ <function-decl name='lzc_redact' mangled-name='lzc_redact' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_redact'>
+ <parameter type-id='80f4b756' name='snapshot'/>
+ <parameter type-id='80f4b756' name='bookname'/>
+ <parameter type-id='5ce45b60' name='snapnv'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_dec_16_nv' mangled-name='atomic_dec_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16_nv'>
- <parameter type-id='type-id-199' name='target'/>
- <return type-id='type-id-197'/>
+ <function-decl name='lzc_trim' mangled-name='lzc_trim' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_trim'>
+ <parameter type-id='80f4b756' name='poolname'/>
+ <parameter type-id='b1146b8d' name='cmd_type'/>
+ <parameter type-id='9c313c2d' name='rate'/>
+ <parameter type-id='c19b74c3' name='secure'/>
+ <parameter type-id='5ce45b60' name='vdevs'/>
+ <parameter type-id='857bb57e' name='errlist'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_dec_8_nv' mangled-name='atomic_dec_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8_nv'>
- <parameter type-id='type-id-201' name='target'/>
- <return type-id='type-id-33'/>
+ <function-decl name='lzc_initialize' mangled-name='lzc_initialize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_initialize'>
+ <parameter type-id='80f4b756' name='poolname'/>
+ <parameter type-id='7063e1ab' name='cmd_type'/>
+ <parameter type-id='5ce45b60' name='vdevs'/>
+ <parameter type-id='857bb57e' name='errlist'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_inc_ulong_nv' mangled-name='atomic_inc_ulong_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong_nv'>
- <parameter type-id='type-id-190' name='target'/>
- <return type-id='type-id-143'/>
+ <function-decl name='lzc_reopen' mangled-name='lzc_reopen' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_reopen'>
+ <parameter type-id='80f4b756' name='pool_name'/>
+ <parameter type-id='c19b74c3' name='scrub_restart'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_inc_32_nv' mangled-name='atomic_inc_32_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32_nv'>
- <parameter type-id='type-id-194' name='target'/>
- <return type-id='type-id-7'/>
+ <function-decl name='lzc_change_key' mangled-name='lzc_change_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_change_key'>
+ <parameter type-id='80f4b756' name='fsname'/>
+ <parameter type-id='9c313c2d' name='crypt_cmd'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='ae3e8ca6' name='wkeydata'/>
+ <parameter type-id='3502e3ff' name='wkeylen'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_inc_16_nv' mangled-name='atomic_inc_16_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16_nv'>
- <parameter type-id='type-id-199' name='target'/>
- <return type-id='type-id-197'/>
+ <function-decl name='lzc_unload_key' mangled-name='lzc_unload_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_unload_key'>
+ <parameter type-id='80f4b756' name='fsname'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_inc_8_nv' mangled-name='atomic_inc_8_nv' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8_nv'>
- <parameter type-id='type-id-201' name='target'/>
- <return type-id='type-id-33'/>
+ <function-decl name='lzc_load_key' mangled-name='lzc_load_key' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_load_key'>
+ <parameter type-id='80f4b756' name='fsname'/>
+ <parameter type-id='c19b74c3' name='noop'/>
+ <parameter type-id='ae3e8ca6' name='wkeydata'/>
+ <parameter type-id='3502e3ff' name='wkeylen'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_and_ulong' mangled-name='atomic_and_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_ulong'>
- <parameter type-id='type-id-190' name='target'/>
- <parameter type-id='type-id-143' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_channel_program_nosync' mangled-name='lzc_channel_program_nosync' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_channel_program_nosync'>
+ <parameter type-id='80f4b756' name='pool'/>
+ <parameter type-id='80f4b756' name='program'/>
+ <parameter type-id='9c313c2d' name='timeout'/>
+ <parameter type-id='9c313c2d' name='memlimit'/>
+ <parameter type-id='5ce45b60' name='argnvl'/>
+ <parameter type-id='857bb57e' name='outnvl'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_and_32' mangled-name='atomic_and_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_32'>
- <parameter type-id='type-id-194' name='target'/>
- <parameter type-id='type-id-7' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_pool_checkpoint_discard' mangled-name='lzc_pool_checkpoint_discard' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_pool_checkpoint_discard'>
+ <parameter type-id='80f4b756' name='pool'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_and_16' mangled-name='atomic_and_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_16'>
- <parameter type-id='type-id-199' name='target'/>
- <parameter type-id='type-id-197' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_pool_checkpoint' mangled-name='lzc_pool_checkpoint' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_pool_checkpoint'>
+ <parameter type-id='80f4b756' name='pool'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_and_8' mangled-name='atomic_and_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_and_8'>
- <parameter type-id='type-id-201' name='target'/>
- <parameter type-id='type-id-33' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_channel_program' mangled-name='lzc_channel_program' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_channel_program'>
+ <parameter type-id='80f4b756' name='pool'/>
+ <parameter type-id='80f4b756' name='program'/>
+ <parameter type-id='9c313c2d' name='instrlimit'/>
+ <parameter type-id='9c313c2d' name='memlimit'/>
+ <parameter type-id='5ce45b60' name='argnvl'/>
+ <parameter type-id='857bb57e' name='outnvl'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_or_ulong' mangled-name='atomic_or_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_ulong'>
- <parameter type-id='type-id-190' name='target'/>
- <parameter type-id='type-id-143' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_destroy_bookmarks' mangled-name='lzc_destroy_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_destroy_bookmarks'>
+ <parameter type-id='5ce45b60' name='bmarks'/>
+ <parameter type-id='857bb57e' name='errlist'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_or_32' mangled-name='atomic_or_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_32'>
- <parameter type-id='type-id-194' name='target'/>
- <parameter type-id='type-id-7' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_get_bookmark_props' mangled-name='lzc_get_bookmark_props' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_bookmark_props'>
+ <parameter type-id='80f4b756' name='bookmark'/>
+ <parameter type-id='857bb57e' name='props'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_or_16' mangled-name='atomic_or_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_16'>
- <parameter type-id='type-id-199' name='target'/>
- <parameter type-id='type-id-197' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_get_bookmarks' mangled-name='lzc_get_bookmarks' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_bookmarks'>
+ <parameter type-id='80f4b756' name='fsname'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='857bb57e' name='bmarks'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_or_8' mangled-name='atomic_or_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_or_8'>
- <parameter type-id='type-id-201' name='target'/>
- <parameter type-id='type-id-33' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_bookmark' mangled-name='lzc_bookmark' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_bookmark'>
+ <parameter type-id='5ce45b60' name='bookmarks'/>
+ <parameter type-id='857bb57e' name='errlist'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_sub_ptr' mangled-name='atomic_sub_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_ptr'>
- <parameter type-id='type-id-192' name='target'/>
- <parameter type-id='type-id-88' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_rollback_to' mangled-name='lzc_rollback_to' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_rollback_to'>
+ <parameter type-id='80f4b756' name='fsname'/>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_sub_long' mangled-name='atomic_sub_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_long'>
- <parameter type-id='type-id-190' name='target'/>
- <parameter type-id='type-id-86' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_rollback' mangled-name='lzc_rollback' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_rollback'>
+ <parameter type-id='80f4b756' name='fsname'/>
+ <parameter type-id='26a90f95' name='snapnamebuf'/>
+ <parameter type-id='95e97e5e' name='snapnamelen'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_sub_32' mangled-name='atomic_sub_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_32'>
- <parameter type-id='type-id-194' name='target'/>
- <parameter type-id='type-id-6' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_receive_with_cmdprops' mangled-name='lzc_receive_with_cmdprops' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_with_cmdprops'>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='5ce45b60' name='cmdprops'/>
+ <parameter type-id='ae3e8ca6' name='wkeydata'/>
+ <parameter type-id='3502e3ff' name='wkeylen'/>
+ <parameter type-id='80f4b756' name='origin'/>
+ <parameter type-id='c19b74c3' name='force'/>
+ <parameter type-id='c19b74c3' name='resumable'/>
+ <parameter type-id='c19b74c3' name='raw'/>
+ <parameter type-id='95e97e5e' name='input_fd'/>
+ <parameter type-id='8341348b' name='begin_record'/>
+ <parameter type-id='95e97e5e' name='cleanup_fd'/>
+ <parameter type-id='5d6479ae' name='read_bytes'/>
+ <parameter type-id='5d6479ae' name='errflags'/>
+ <parameter type-id='5d6479ae' name='action_handle'/>
+ <parameter type-id='857bb57e' name='errors'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_sub_16' mangled-name='atomic_sub_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_16'>
- <parameter type-id='type-id-199' name='target'/>
- <parameter type-id='type-id-203' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_receive_one' mangled-name='lzc_receive_one' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_one'>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='80f4b756' name='origin'/>
+ <parameter type-id='c19b74c3' name='force'/>
+ <parameter type-id='c19b74c3' name='resumable'/>
+ <parameter type-id='c19b74c3' name='raw'/>
+ <parameter type-id='95e97e5e' name='input_fd'/>
+ <parameter type-id='8341348b' name='begin_record'/>
+ <parameter type-id='95e97e5e' name='cleanup_fd'/>
+ <parameter type-id='5d6479ae' name='read_bytes'/>
+ <parameter type-id='5d6479ae' name='errflags'/>
+ <parameter type-id='5d6479ae' name='action_handle'/>
+ <parameter type-id='857bb57e' name='errors'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_sub_8' mangled-name='atomic_sub_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_sub_8'>
- <parameter type-id='type-id-201' name='target'/>
- <parameter type-id='type-id-206' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_receive_with_header' mangled-name='lzc_receive_with_header' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_with_header'>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='80f4b756' name='origin'/>
+ <parameter type-id='c19b74c3' name='force'/>
+ <parameter type-id='c19b74c3' name='resumable'/>
+ <parameter type-id='c19b74c3' name='raw'/>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='8341348b' name='begin_record'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_add_ptr' mangled-name='atomic_add_ptr' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_ptr'>
- <parameter type-id='type-id-192' name='target'/>
- <parameter type-id='type-id-88' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_receive_resumable' mangled-name='lzc_receive_resumable' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive_resumable'>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='80f4b756' name='origin'/>
+ <parameter type-id='c19b74c3' name='force'/>
+ <parameter type-id='c19b74c3' name='raw'/>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_add_long' mangled-name='atomic_add_long' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_long'>
- <parameter type-id='type-id-190' name='target'/>
- <parameter type-id='type-id-86' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_receive' mangled-name='lzc_receive' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_receive'>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='80f4b756' name='origin'/>
+ <parameter type-id='c19b74c3' name='force'/>
+ <parameter type-id='c19b74c3' name='raw'/>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_add_32' mangled-name='atomic_add_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_32'>
- <parameter type-id='type-id-194' name='target'/>
- <parameter type-id='type-id-6' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_send_space' mangled-name='lzc_send_space' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_space'>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='80f4b756' name='from'/>
+ <parameter type-id='bfbd3c8e' name='flags'/>
+ <parameter type-id='5d6479ae' name='spacep'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_add_16' mangled-name='atomic_add_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_16'>
- <parameter type-id='type-id-199' name='target'/>
- <parameter type-id='type-id-203' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_send_space_resume_redacted' mangled-name='lzc_send_space_resume_redacted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_space_resume_redacted'>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='80f4b756' name='from'/>
+ <parameter type-id='bfbd3c8e' name='flags'/>
+ <parameter type-id='9c313c2d' name='resumeobj'/>
+ <parameter type-id='9c313c2d' name='resumeoff'/>
+ <parameter type-id='9c313c2d' name='resume_bytes'/>
+ <parameter type-id='80f4b756' name='redactbook'/>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='5d6479ae' name='spacep'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_add_8' mangled-name='atomic_add_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_add_8'>
- <parameter type-id='type-id-201' name='target'/>
- <parameter type-id='type-id-206' name='bits'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_send_resume_redacted' mangled-name='lzc_send_resume_redacted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_resume_redacted'>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='80f4b756' name='from'/>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='bfbd3c8e' name='flags'/>
+ <parameter type-id='9c313c2d' name='resumeobj'/>
+ <parameter type-id='9c313c2d' name='resumeoff'/>
+ <parameter type-id='80f4b756' name='redactbook'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_dec_ulong' mangled-name='atomic_dec_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_ulong'>
- <parameter type-id='type-id-190' name='target'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_send_resume' mangled-name='lzc_send_resume' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_resume'>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='80f4b756' name='from'/>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='bfbd3c8e' name='flags'/>
+ <parameter type-id='9c313c2d' name='resumeobj'/>
+ <parameter type-id='9c313c2d' name='resumeoff'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_dec_32' mangled-name='atomic_dec_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_32'>
- <parameter type-id='type-id-194' name='target'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_send_redacted' mangled-name='lzc_send_redacted' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send_redacted'>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='80f4b756' name='from'/>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='bfbd3c8e' name='flags'/>
+ <parameter type-id='80f4b756' name='redactbook'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_dec_16' mangled-name='atomic_dec_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_16'>
- <parameter type-id='type-id-199' name='target'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_send' mangled-name='lzc_send' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_send'>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='80f4b756' name='from'/>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='bfbd3c8e' name='flags'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_dec_8' mangled-name='atomic_dec_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_dec_8'>
- <parameter type-id='type-id-201' name='target'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_get_holds' mangled-name='lzc_get_holds' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_get_holds'>
+ <parameter type-id='80f4b756' name='snapname'/>
+ <parameter type-id='857bb57e' name='holdsp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_inc_ulong' mangled-name='atomic_inc_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_ulong'>
- <parameter type-id='type-id-190' name='target'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_release' mangled-name='lzc_release' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_release'>
+ <parameter type-id='5ce45b60' name='holds'/>
+ <parameter type-id='857bb57e' name='errlist'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_inc_32' mangled-name='atomic_inc_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_32'>
- <parameter type-id='type-id-194' name='target'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_hold' mangled-name='lzc_hold' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_hold'>
+ <parameter type-id='5ce45b60' name='holds'/>
+ <parameter type-id='95e97e5e' name='cleanup_fd'/>
+ <parameter type-id='857bb57e' name='errlist'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_inc_16' mangled-name='atomic_inc_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_16'>
- <parameter type-id='type-id-199' name='target'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_sync' mangled-name='lzc_sync' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_sync'>
+ <parameter type-id='80f4b756' name='pool_name'/>
+ <parameter type-id='5ce45b60' name='innvl'/>
+ <parameter type-id='857bb57e' name='outnvl'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_inc_8' mangled-name='atomic_inc_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_inc_8'>
- <parameter type-id='type-id-201' name='target'/>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_exists' mangled-name='lzc_exists' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_exists'>
+ <parameter type-id='80f4b756' name='dataset'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='atomic_cas_8' mangled-name='atomic_cas_8' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_8'>
- <parameter type-id='type-id-201' name='target'/>
- <parameter type-id='type-id-33' name='exp'/>
- <parameter type-id='type-id-33' name='des'/>
- <return type-id='type-id-33'/>
+ <function-decl name='lzc_snaprange_space' mangled-name='lzc_snaprange_space' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_snaprange_space'>
+ <parameter type-id='80f4b756' name='firstsnap'/>
+ <parameter type-id='80f4b756' name='lastsnap'/>
+ <parameter type-id='5d6479ae' name='usedp'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_cas_16' mangled-name='atomic_cas_16' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_16'>
- <parameter type-id='type-id-199' name='target'/>
- <parameter type-id='type-id-197' name='exp'/>
- <parameter type-id='type-id-197' name='des'/>
- <return type-id='type-id-197'/>
+ <function-decl name='lzc_destroy_snaps' mangled-name='lzc_destroy_snaps' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_destroy_snaps'>
+ <parameter type-id='5ce45b60' name='snaps'/>
+ <parameter type-id='c19b74c3' name='defer'/>
+ <parameter type-id='857bb57e' name='errlist'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_cas_32' mangled-name='atomic_cas_32' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_32'>
- <parameter type-id='type-id-194' name='target'/>
- <parameter type-id='type-id-7' name='exp'/>
- <parameter type-id='type-id-7' name='des'/>
- <return type-id='type-id-7'/>
+ <function-decl name='lzc_snapshot' mangled-name='lzc_snapshot' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_snapshot'>
+ <parameter type-id='5ce45b60' name='snaps'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='857bb57e' name='errlist'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='atomic_cas_ulong' mangled-name='atomic_cas_ulong' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='atomic_cas_ulong'>
- <parameter type-id='type-id-190' name='target'/>
- <parameter type-id='type-id-143' name='exp'/>
- <parameter type-id='type-id-143' name='des'/>
- <return type-id='type-id-143'/>
+ <function-decl name='lzc_destroy' mangled-name='lzc_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_destroy'>
+ <parameter type-id='80f4b756' name='fsname'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='getexecname.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='getexecname' mangled-name='getexecname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getexecname'>
- <return type-id='type-id-4'/>
+ <function-decl name='lzc_rename' mangled-name='lzc_rename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_rename'>
+ <parameter type-id='80f4b756' name='source'/>
+ <parameter type-id='80f4b756' name='target'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='getexecname_impl' mangled-name='getexecname_impl' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_promote' mangled-name='lzc_promote' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_promote'>
+ <parameter type-id='80f4b756' name='fsname'/>
+ <parameter type-id='26a90f95' name='snapnamebuf'/>
+ <parameter type-id='95e97e5e' name='snapnamelen'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/gethostid.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='get_system_hostid' mangled-name='get_system_hostid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='get_system_hostid'>
- <return type-id='type-id-12'/>
+ <function-decl name='lzc_clone' mangled-name='lzc_clone' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_clone'>
+ <parameter type-id='80f4b756' name='fsname'/>
+ <parameter type-id='80f4b756' name='origin'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fopen' mangled-name='fopen64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='lzc_create' mangled-name='lzc_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzc_create'>
+ <parameter type-id='80f4b756' name='fsname'/>
+ <parameter type-id='bc9887f1' name='type'/>
+ <parameter type-id='5ce45b60' name='props'/>
+ <parameter type-id='ae3e8ca6' name='wkeydata'/>
+ <parameter type-id='3502e3ff' name='wkeylen'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fscanf' mangled-name='fscanf' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='libzfs_core_fini' mangled-name='libzfs_core_fini' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_core_fini'>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='fclose' mangled-name='fclose' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='libzfs_core_init' mangled-name='libzfs_core_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='libzfs_core_init'>
+ <return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/getmntany.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <class-decl name='extmnttab' size-in-bits='320' is-struct='yes' visibility='default' id='type-id-207'>
+ <abi-instr version='1.0' address-size='64' path='os/linux/zutil_compat.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='32768' id='d16c6df4'>
+ <subrange length='4096' type-id='7359adad' id='bc1b5ddc'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='65536' id='163f6aa5'>
+ <subrange length='8192' type-id='7359adad' id='c88f397d'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='9c313c2d' size-in-bits='128' id='c1c22e6c'>
+ <subrange length='2' type-id='7359adad' id='52efc4ef'/>
+ </array-type-def>
+ <typedef-decl name='zfs_cmd_t' type-id='3522cd69' id='a5559cdd'/>
+ <class-decl name='zfs_cmd' size-in-bits='109952' is-struct='yes' visibility='default' id='3522cd69'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_special' type-id='type-id-36' visibility='default'/>
+ <var-decl name='zc_name' type-id='d16c6df4' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_mountp' type-id='type-id-36' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32768'>
+ <var-decl name='zc_nvlist_src' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_fstype' type-id='type-id-36' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32832'>
+ <var-decl name='zc_nvlist_src_size' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_mntopts' type-id='type-id-36' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32896'>
+ <var-decl name='zc_nvlist_dst' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='mnt_major' type-id='type-id-35' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='32960'>
+ <var-decl name='zc_nvlist_dst_size' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='mnt_minor' type-id='type-id-35' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33024'>
+ <var-decl name='zc_nvlist_dst_filled' type-id='c19b74c3' visibility='default'/>
</data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-207' size-in-bits='64' id='type-id-208'/>
- <class-decl name='stat64' size-in-bits='1152' is-struct='yes' visibility='default' id='type-id-209'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='st_dev' type-id='type-id-210' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33056'>
+ <var-decl name='zc_pad2' type-id='95e97e5e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='st_ino' type-id='type-id-211' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33088'>
+ <var-decl name='zc_history' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='st_nlink' type-id='type-id-212' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='33152'>
+ <var-decl name='zc_value' type-id='163f6aa5' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='98688'>
+ <var-decl name='zc_string' type-id='d1617432' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='100736'>
+ <var-decl name='zc_guid' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='100800'>
+ <var-decl name='zc_nvlist_conf' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='100864'>
+ <var-decl name='zc_nvlist_conf_size' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='100928'>
+ <var-decl name='zc_cookie' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='100992'>
+ <var-decl name='zc_objset_type' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101056'>
+ <var-decl name='zc_perm_action' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101120'>
+ <var-decl name='zc_history_len' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101184'>
+ <var-decl name='zc_history_offset' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101248'>
+ <var-decl name='zc_obj' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101312'>
+ <var-decl name='zc_iflags' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='101376'>
+ <var-decl name='zc_share' type-id='ee5cec36' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='st_mode' type-id='type-id-213' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='101632'>
+ <var-decl name='zc_objset_stats' type-id='b2c14f17' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='224'>
- <var-decl name='st_uid' type-id='type-id-214' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='103936'>
+ <var-decl name='zc_begin_record' type-id='09fcdc01' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='st_gid' type-id='type-id-215' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='106368'>
+ <var-decl name='zc_inject_record' type-id='a4301ca6' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='__pad0' type-id='type-id-1' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109184'>
+ <var-decl name='zc_defer_destroy' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='st_rdev' type-id='type-id-210' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109216'>
+ <var-decl name='zc_flags' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='st_size' type-id='type-id-216' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109248'>
+ <var-decl name='zc_action_handle' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='st_blksize' type-id='type-id-217' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109312'>
+ <var-decl name='zc_cleanup_fd' type-id='95e97e5e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='st_blocks' type-id='type-id-218' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109344'>
+ <var-decl name='zc_simple' type-id='b96825af' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='st_atim' type-id='type-id-219' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109352'>
+ <var-decl name='zc_pad' type-id='d3490169' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='st_mtim' type-id='type-id-219' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109376'>
+ <var-decl name='zc_sendobj' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='st_ctim' type-id='type-id-219' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109440'>
+ <var-decl name='zc_fromobj' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='__glibc_reserved' type-id='type-id-220' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109504'>
+ <var-decl name='zc_createtxg' type-id='9c313c2d' visibility='default'/>
</data-member>
- </class-decl>
- <typedef-decl name='__dev_t' type-id='type-id-12' id='type-id-210'/>
- <typedef-decl name='__ino64_t' type-id='type-id-12' id='type-id-211'/>
- <typedef-decl name='__nlink_t' type-id='type-id-12' id='type-id-212'/>
- <typedef-decl name='__mode_t' type-id='type-id-10' id='type-id-213'/>
- <typedef-decl name='__uid_t' type-id='type-id-10' id='type-id-214'/>
- <typedef-decl name='__gid_t' type-id='type-id-10' id='type-id-215'/>
- <typedef-decl name='__off_t' type-id='type-id-86' id='type-id-216'/>
- <typedef-decl name='__blksize_t' type-id='type-id-86' id='type-id-217'/>
- <typedef-decl name='__blkcnt64_t' type-id='type-id-86' id='type-id-218'/>
- <class-decl name='timespec' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-219'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='tv_sec' type-id='type-id-221' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109568'>
+ <var-decl name='zc_stat' type-id='0371a9c7' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='tv_nsec' type-id='type-id-222' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='109888'>
+ <var-decl name='zc_zoneid' type-id='9c313c2d' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='__time_t' type-id='type-id-86' id='type-id-221'/>
- <typedef-decl name='__syscall_slong_t' type-id='type-id-86' id='type-id-222'/>
-
- <array-type-def dimensions='1' type-id='type-id-222' size-in-bits='192' id='type-id-220'>
- <subrange length='3' type-id='type-id-12' id='type-id-75'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-209' size-in-bits='64' id='type-id-223'/>
- <function-decl name='getextmntent' mangled-name='getextmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getextmntent'>
- <parameter type-id='type-id-4' name='path'/>
- <parameter type-id='type-id-208' name='entry'/>
- <parameter type-id='type-id-223' name='statbuf'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='type-id-224'>
+ <typedef-decl name='zfs_share_t' type-id='feb6f2da' id='ee5cec36'/>
+ <class-decl name='zfs_share' size-in-bits='256' is-struct='yes' visibility='default' id='feb6f2da'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_flags' type-id='type-id-1' visibility='default'/>
+ <var-decl name='z_exportdata' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_IO_read_ptr' type-id='type-id-36' visibility='default'/>
+ <var-decl name='z_sharedata' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_IO_read_end' type-id='type-id-36' visibility='default'/>
+ <var-decl name='z_sharetype' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='_IO_read_base' type-id='type-id-36' visibility='default'/>
+ <var-decl name='z_sharemax' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='_IO_write_base' type-id='type-id-36' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='_IO_write_ptr' type-id='type-id-36' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='_IO_write_end' type-id='type-id-36' visibility='default'/>
+ </class-decl>
+ <typedef-decl name='dmu_objset_stats_t' type-id='098f0221' id='b2c14f17'/>
+ <class-decl name='dmu_objset_stats' size-in-bits='2304' is-struct='yes' visibility='default' id='098f0221'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='dds_num_clones' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='_IO_buf_base' type-id='type-id-36' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='dds_creation_txg' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='_IO_buf_end' type-id='type-id-36' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='dds_guid' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='_IO_save_base' type-id='type-id-36' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='dds_type' type-id='230f1e16' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='_IO_backup_base' type-id='type-id-36' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='224'>
+ <var-decl name='dds_is_snapshot' type-id='b96825af' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='_IO_save_end' type-id='type-id-36' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='232'>
+ <var-decl name='dds_inconsistent' type-id='b96825af' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='_markers' type-id='type-id-225' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='240'>
+ <var-decl name='dds_redacted' type-id='b96825af' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='_chain' type-id='type-id-226' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='248'>
+ <var-decl name='dds_origin' type-id='d1617432' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='_fileno' type-id='type-id-1' visibility='default'/>
+ </class-decl>
+ <typedef-decl name='zinject_record_t' type-id='3216f820' id='a4301ca6'/>
+ <class-decl name='zinject_record' size-in-bits='2816' is-struct='yes' visibility='default' id='3216f820'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='zi_objset' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='928'>
- <var-decl name='_flags2' type-id='type-id-1' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='64'>
+ <var-decl name='zi_object' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='_old_offset' type-id='type-id-216' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='128'>
+ <var-decl name='zi_start' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='_cur_column' type-id='type-id-195' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='zi_end' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1040'>
- <var-decl name='_vtable_offset' type-id='type-id-204' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='256'>
+ <var-decl name='zi_guid' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1048'>
- <var-decl name='_shortbuf' type-id='type-id-227' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='zi_level' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='_offset' type-id='type-id-228' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='352'>
+ <var-decl name='zi_error' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='__pad1' type-id='type-id-89' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='zi_type' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='__pad2' type-id='type-id-89' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='zi_freq' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1344'>
- <var-decl name='__pad3' type-id='type-id-89' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='480'>
+ <var-decl name='zi_failfast' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1408'>
- <var-decl name='__pad4' type-id='type-id-89' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='512'>
+ <var-decl name='zi_func' type-id='d1617432' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1472'>
- <var-decl name='__pad5' type-id='type-id-85' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2560'>
+ <var-decl name='zi_iotype' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1536'>
- <var-decl name='_mode' type-id='type-id-1' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2592'>
+ <var-decl name='zi_duration' type-id='3ff5601b' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='1568'>
- <var-decl name='_unused2' type-id='type-id-229' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2624'>
+ <var-decl name='zi_timer' type-id='9c313c2d' visibility='default'/>
</data-member>
- </class-decl>
- <class-decl name='_IO_marker' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-230'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_next' type-id='type-id-225' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2688'>
+ <var-decl name='zi_nlanes' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_sbuf' type-id='type-id-226' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2752'>
+ <var-decl name='zi_cmd' type-id='8f92235e' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_pos' type-id='type-id-1' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='2784'>
+ <var-decl name='zi_dvas' type-id='8f92235e' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-230' size-in-bits='64' id='type-id-225'/>
- <pointer-type-def type-id='type-id-224' size-in-bits='64' id='type-id-226'/>
-
- <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='8' id='type-id-227'>
- <subrange length='1' type-id='type-id-12' id='type-id-231'/>
-
- </array-type-def>
- <typedef-decl name='__off64_t' type-id='type-id-86' id='type-id-228'/>
-
- <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='160' id='type-id-229'>
- <subrange length='20' type-id='type-id-12' id='type-id-232'/>
-
- </array-type-def>
- <typedef-decl name='FILE' type-id='type-id-224' id='type-id-233'/>
- <pointer-type-def type-id='type-id-233' size-in-bits='64' id='type-id-234'/>
- <class-decl name='mnttab' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-235'>
+ <typedef-decl name='zfs_stat_t' type-id='6417f0b9' id='0371a9c7'/>
+ <class-decl name='zfs_stat' size-in-bits='320' is-struct='yes' visibility='default' id='6417f0b9'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='mnt_special' type-id='type-id-36' visibility='default'/>
+ <var-decl name='zs_gen' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='mnt_mountp' type-id='type-id-36' visibility='default'/>
+ <var-decl name='zs_mode' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='mnt_fstype' type-id='type-id-36' visibility='default'/>
+ <var-decl name='zs_links' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='mnt_mntopts' type-id='type-id-36' visibility='default'/>
+ <var-decl name='zs_ctime' type-id='c1c22e6c' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-235' size-in-bits='64' id='type-id-236'/>
- <function-decl name='getmntany' mangled-name='getmntany' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getmntany'>
- <parameter type-id='type-id-234' name='fp'/>
- <parameter type-id='type-id-236' name='mgetp'/>
- <parameter type-id='type-id-236' name='mrefp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='_sol_getmntent' mangled-name='_sol_getmntent' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='_sol_getmntent'>
- <parameter type-id='type-id-234' name='fp'/>
- <parameter type-id='type-id-236' name='mgetp'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__builtin_fwrite' mangled-name='fwrite' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='feof' mangled-name='feof' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='getmntent_r' mangled-name='getmntent_r' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <pointer-type-def type-id='a5559cdd' size-in-bits='64' id='e4ec4540'/>
+ <function-decl name='zfs_ioctl_fd' mangled-name='zfs_ioctl_fd' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_ioctl_fd'>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='7359adad' name='request'/>
+ <parameter type-id='e4ec4540' name='zc'/>
+ <return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='list.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <class-decl name='list' size-in-bits='256' is-struct='yes' visibility='default' id='type-id-237'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='list_size' type-id='type-id-85' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='list_offset' type-id='type-id-85' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='list_head' type-id='type-id-238' visibility='default'/>
- </data-member>
- </class-decl>
- <class-decl name='list_node' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-238'>
- <data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='next' type-id='type-id-239' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='prev' type-id='type-id-239' visibility='default'/>
- </data-member>
- </class-decl>
- <pointer-type-def type-id='type-id-238' size-in-bits='64' id='type-id-239'/>
- <typedef-decl name='list_t' type-id='type-id-237' id='type-id-240'/>
- <pointer-type-def type-id='type-id-240' size-in-bits='64' id='type-id-241'/>
- <function-decl name='list_is_empty' mangled-name='list_is_empty' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_is_empty'>
- <parameter type-id='type-id-241' name='list'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <typedef-decl name='list_node_t' type-id='type-id-238' id='type-id-242'/>
- <pointer-type-def type-id='type-id-242' size-in-bits='64' id='type-id-243'/>
- <function-decl name='list_link_active' mangled-name='list_link_active' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_active'>
- <parameter type-id='type-id-243' name='ln'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='list_link_init' mangled-name='list_link_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_init'>
- <parameter type-id='type-id-243' name='ln'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='list_link_replace' mangled-name='list_link_replace' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_link_replace'>
- <parameter type-id='type-id-243' name='lold'/>
- <parameter type-id='type-id-243' name='lnew'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='list_move_tail' mangled-name='list_move_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_move_tail'>
- <parameter type-id='type-id-241' name='dst'/>
- <parameter type-id='type-id-241' name='src'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='list_prev' mangled-name='list_prev' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_prev'>
- <parameter type-id='type-id-241' name='list'/>
- <parameter type-id='type-id-89' name='object'/>
- <return type-id='type-id-89'/>
- </function-decl>
- <function-decl name='list_next' mangled-name='list_next' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_next'>
- <parameter type-id='type-id-241' name='list'/>
- <parameter type-id='type-id-89' name='object'/>
- <return type-id='type-id-89'/>
- </function-decl>
- <function-decl name='list_tail' mangled-name='list_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_tail'>
- <parameter type-id='type-id-241' name='list'/>
- <return type-id='type-id-89'/>
- </function-decl>
- <function-decl name='list_head' mangled-name='list_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_head'>
- <parameter type-id='type-id-241' name='list'/>
- <return type-id='type-id-89'/>
- </function-decl>
- <function-decl name='list_remove_tail' mangled-name='list_remove_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_tail'>
- <parameter type-id='type-id-241' name='list'/>
- <return type-id='type-id-89'/>
- </function-decl>
- <function-decl name='list_remove_head' mangled-name='list_remove_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove_head'>
- <parameter type-id='type-id-241' name='list'/>
- <return type-id='type-id-89'/>
+ <abi-instr version='1.0' address-size='64' path='os/linux/zutil_device_path_os.c' language='LANG_C99'>
+ <function-decl name='is_mpath_whole_disk' mangled-name='is_mpath_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='is_mpath_whole_disk'>
+ <parameter type-id='80f4b756' name='path'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='list_remove' mangled-name='list_remove' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_remove'>
- <parameter type-id='type-id-241' name='list'/>
- <parameter type-id='type-id-89' name='object'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_get_underlying_path' mangled-name='zfs_get_underlying_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_underlying_path'>
+ <parameter type-id='80f4b756' name='dev_name'/>
+ <return type-id='26a90f95'/>
</function-decl>
- <function-decl name='list_insert_tail' mangled-name='list_insert_tail' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_tail'>
- <parameter type-id='type-id-241' name='list'/>
- <parameter type-id='type-id-89' name='object'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_dev_is_whole_disk' mangled-name='zfs_dev_is_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_is_whole_disk'>
+ <parameter type-id='80f4b756' name='dev_name'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='list_insert_head' mangled-name='list_insert_head' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_head'>
- <parameter type-id='type-id-241' name='list'/>
- <parameter type-id='type-id-89' name='object'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_dev_is_dm' mangled-name='zfs_dev_is_dm' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_is_dm'>
+ <parameter type-id='80f4b756' name='dev_name'/>
+ <return type-id='c19b74c3'/>
</function-decl>
- <function-decl name='list_insert_before' mangled-name='list_insert_before' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_before'>
- <parameter type-id='type-id-241' name='list'/>
- <parameter type-id='type-id-89' name='object'/>
- <parameter type-id='type-id-89' name='nobject'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_get_enclosure_sysfs_path' mangled-name='zfs_get_enclosure_sysfs_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_get_enclosure_sysfs_path'>
+ <parameter type-id='80f4b756' name='dev_name'/>
+ <return type-id='26a90f95'/>
</function-decl>
- <function-decl name='list_insert_after' mangled-name='list_insert_after' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_insert_after'>
- <parameter type-id='type-id-241' name='list'/>
- <parameter type-id='type-id-89' name='object'/>
- <parameter type-id='type-id-89' name='nobject'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_strip_path' mangled-name='zfs_strip_path' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strip_path'>
+ <parameter type-id='26a90f95' name='path'/>
+ <return type-id='26a90f95'/>
</function-decl>
- <function-decl name='list_destroy' mangled-name='list_destroy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_destroy'>
- <parameter type-id='type-id-241' name='list'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_strip_partition' mangled-name='zfs_strip_partition' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strip_partition'>
+ <parameter type-id='26a90f95' name='path'/>
+ <return type-id='26a90f95'/>
</function-decl>
- <function-decl name='list_create' mangled-name='list_create' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='list_create'>
- <parameter type-id='type-id-241' name='list'/>
- <parameter type-id='type-id-85' name='size'/>
- <parameter type-id='type-id-85' name='offset'/>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_append_partition' mangled-name='zfs_append_partition' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_append_partition'>
+ <parameter type-id='26a90f95' name='path'/>
+ <parameter type-id='b59d7dce' name='max_len'/>
+ <return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='mkdirp.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <typedef-decl name='mode_t' type-id='type-id-213' id='type-id-244'/>
- <function-decl name='mkdirp' mangled-name='mkdirp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='mkdirp'>
- <parameter type-id='type-id-4' name='d'/>
- <parameter type-id='type-id-244' name='mode'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='__mbstowcs_alias' mangled-name='mbstowcs' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='__wcstombs_alias' mangled-name='wcstombs' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <abi-instr version='1.0' address-size='64' path='os/linux/zutil_import_os.c' language='LANG_C99'>
+ <class-decl name='udev_device' is-struct='yes' visibility='default' is-declaration-only='yes' id='640b33ca'/>
+ <qualified-type-def type-id='80f4b756' const='yes' id='b99c00c9'/>
+ <pointer-type-def type-id='b99c00c9' size-in-bits='64' id='13956559'/>
+ <pointer-type-def type-id='b59d7dce' size-in-bits='64' id='78c01427'/>
+ <pointer-type-def type-id='640b33ca' size-in-bits='64' id='b32bae08'/>
+ <function-decl name='update_vdev_config_dev_strs' mangled-name='update_vdev_config_dev_strs' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='update_vdev_config_dev_strs'>
+ <parameter type-id='5ce45b60' name='nv'/>
+ <return type-id='48b5725f'/>
</function-decl>
- <function-decl name='mkdir' mangled-name='mkdir' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='zpool_label_disk_wait' mangled-name='zpool_label_disk_wait' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_label_disk_wait'>
+ <parameter type-id='80f4b756' name='path'/>
+ <parameter type-id='95e97e5e' name='timeout_ms'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='page.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='spl_pagesize' mangled-name='spl_pagesize' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='spl_pagesize'>
- <return type-id='type-id-85'/>
+ <function-decl name='zfs_device_get_devid' mangled-name='zfs_device_get_devid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_device_get_devid'>
+ <parameter type-id='b32bae08' name='dev'/>
+ <parameter type-id='26a90f95' name='bufptr'/>
+ <parameter type-id='b59d7dce' name='buflen'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='strlcat.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='strlcat' mangled-name='strlcat' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcat'>
- <parameter type-id='type-id-36' name='dst'/>
- <parameter type-id='type-id-4' name='src'/>
- <parameter type-id='type-id-85' name='dstsize'/>
- <return type-id='type-id-85'/>
+ <function-decl name='zpool_default_search_paths' mangled-name='zpool_default_search_paths' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_default_search_paths'>
+ <parameter type-id='78c01427' name='count'/>
+ <return type-id='13956559'/>
</function-decl>
- <function-decl name='__builtin_memcpy' mangled-name='memcpy' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_dev_flush' mangled-name='zfs_dev_flush' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dev_flush'>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='strlcpy.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='strlcpy' mangled-name='strlcpy' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='strlcpy'>
- <parameter type-id='type-id-36' name='dst'/>
- <parameter type-id='type-id-4' name='src'/>
- <parameter type-id='type-id-85' name='len'/>
- <return type-id='type-id-85'/>
+ <function-decl name='zfs_device_get_physical' mangled-name='zfs_device_get_physical' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_device_get_physical'>
+ <parameter type-id='b32bae08' name='dev'/>
+ <parameter type-id='26a90f95' name='bufptr'/>
+ <parameter type-id='b59d7dce' name='buflen'/>
+ <return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='timestamp.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <function-decl name='print_timestamp' mangled-name='print_timestamp' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='print_timestamp'>
- <parameter type-id='type-id-35' name='timestamp_fmt'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='localtime' mangled-name='localtime' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='strftime' mangled-name='strftime' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <abi-instr version='1.0' address-size='64' path='zutil_device_path.c' language='LANG_C99'>
+ <type-decl name='long int' size-in-bits='64' id='bd54fe1a'/>
+ <typedef-decl name='size_t' type-id='7359adad' id='b59d7dce'/>
+ <typedef-decl name='ssize_t' type-id='41060289' id='79a0948f'/>
+ <typedef-decl name='__ssize_t' type-id='bd54fe1a' id='41060289'/>
+ <function-decl name='zfs_strcmp_pathname' mangled-name='zfs_strcmp_pathname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_strcmp_pathname'>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='80f4b756' name='cmp'/>
+ <parameter type-id='95e97e5e' name='wholedisk'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='time' mangled-name='time' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_resolve_shortname' mangled-name='zfs_resolve_shortname' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_resolve_shortname'>
+ <parameter type-id='80f4b756' name='name'/>
+ <parameter type-id='26a90f95' name='path'/>
+ <parameter type-id='b59d7dce' name='len'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='nl_langinfo' mangled-name='nl_langinfo' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='zfs_dirnamelen' mangled-name='zfs_dirnamelen' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_dirnamelen'>
+ <parameter type-id='80f4b756' name='path'/>
+ <return type-id='79a0948f'/>
</function-decl>
- </abi-instr>
- <abi-instr version='1.0' address-size='64' path='os/linux/zone.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libspl' language='LANG_C99'>
- <typedef-decl name='zoneid_t' type-id='type-id-1' id='type-id-245'/>
- <function-decl name='getzoneid' mangled-name='getzoneid' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='getzoneid'>
- <return type-id='type-id-245'/>
+ <function-decl name='zfs_basename' mangled-name='zfs_basename' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_basename'>
+ <parameter type-id='80f4b756' name='path'/>
+ <return type-id='80f4b756'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='rdwr_efi.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libefi' language='LANG_C99'>
- <var-decl name='efi_debug' type-id='type-id-1' mangled-name='efi_debug' visibility='default' elf-symbol-id='efi_debug'/>
- <class-decl name='dk_gpt' size-in-bits='1920' is-struct='yes' visibility='default' id='type-id-246'>
+ <abi-instr version='1.0' address-size='64' path='zutil_import.c' language='LANG_C99'>
+ <typedef-decl name='importargs_t' type-id='7ac83801' id='7a842a6b'/>
+ <class-decl name='importargs' size-in-bits='448' is-struct='yes' visibility='default' id='7ac83801'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='efi_version' type-id='type-id-35' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='efi_nparts' type-id='type-id-35' visibility='default'/>
+ <var-decl name='path' type-id='9b23c9ad' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='efi_part_size' type-id='type-id-35' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='96'>
- <var-decl name='efi_lbasize' type-id='type-id-35' visibility='default'/>
+ <var-decl name='paths' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='efi_last_lba' type-id='type-id-247' visibility='default'/>
+ <var-decl name='poolname' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='efi_first_u_lba' type-id='type-id-247' visibility='default'/>
+ <var-decl name='guid' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='efi_last_u_lba' type-id='type-id-247' visibility='default'/>
+ <var-decl name='cachefile' type-id='80f4b756' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='efi_disk_uguid' type-id='type-id-248' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='efi_flags' type-id='type-id-35' visibility='default'/>
+ <var-decl name='can_be_active' type-id='c19b74c3' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='480'>
- <var-decl name='efi_reserved1' type-id='type-id-35' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='efi_altern_lba' type-id='type-id-247' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='efi_reserved' type-id='type-id-249' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='352'>
+ <var-decl name='scan' type-id='c19b74c3' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='efi_parts' type-id='type-id-250' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='policy' type-id='5ce45b60' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='longlong_t' type-id='type-id-172' id='type-id-251'/>
- <typedef-decl name='diskaddr_t' type-id='type-id-251' id='type-id-247'/>
- <class-decl name='uuid' size-in-bits='128' is-struct='yes' visibility='default' id='type-id-248'>
+ <typedef-decl name='pool_config_ops_t' type-id='1a21babe' id='b1e62775'/>
+ <class-decl name='pool_config_ops' size-in-bits='128' is-struct='yes' visibility='default' id='8b092c69'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='time_low' type-id='type-id-7' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='32'>
- <var-decl name='time_mid' type-id='type-id-197' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='48'>
- <var-decl name='time_hi_and_version' type-id='type-id-197' visibility='default'/>
+ <var-decl name='pco_refresh_config' type-id='e7c00489' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='clock_seq_hi_and_reserved' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='72'>
- <var-decl name='clock_seq_low' type-id='type-id-33' visibility='default'/>
- </data-member>
- <data-member access='public' layout-offset-in-bits='80'>
- <var-decl name='node_addr' type-id='type-id-72' visibility='default'/>
+ <var-decl name='pco_pool_active' type-id='9eadf5e0' visibility='default'/>
</data-member>
</class-decl>
-
- <array-type-def dimensions='1' type-id='type-id-35' size-in-bits='384' id='type-id-249'>
- <subrange length='12' type-id='type-id-12' id='type-id-70'/>
-
+ <typedef-decl name='refresh_config_func_t' type-id='29f040d2' id='b7c58eaa'/>
+ <typedef-decl name='pool_active_func_t' type-id='baa42fef' id='de5d1d8f'/>
+ <pointer-type-def type-id='26a90f95' size-in-bits='64' id='9b23c9ad'/>
+ <qualified-type-def type-id='8b092c69' const='yes' id='1a21babe'/>
+ <pointer-type-def type-id='7a842a6b' size-in-bits='64' id='07ee4a58'/>
+ <pointer-type-def type-id='95e97e5e' size-in-bits='64' id='7292109c'/>
+ <pointer-type-def type-id='de5d1d8f' size-in-bits='64' id='9eadf5e0'/>
+ <pointer-type-def type-id='b1e62775' size-in-bits='64' id='f095e320'/>
+ <pointer-type-def type-id='b7c58eaa' size-in-bits='64' id='e7c00489'/>
+ <pointer-type-def type-id='48b5725f' size-in-bits='64' id='eaa32e2f'/>
+ <function-decl name='zpool_find_config' mangled-name='zpool_find_config' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_find_config'>
+ <parameter type-id='eaa32e2f' name='hdl'/>
+ <parameter type-id='80f4b756' name='target'/>
+ <parameter type-id='857bb57e' name='configp'/>
+ <parameter type-id='07ee4a58' name='args'/>
+ <parameter type-id='f095e320' name='pco'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-decl name='zpool_search_import' mangled-name='zpool_search_import' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_search_import'>
+ <parameter type-id='eaa32e2f' name='hdl'/>
+ <parameter type-id='07ee4a58' name='import'/>
+ <parameter type-id='f095e320' name='pco'/>
+ <return type-id='5ce45b60'/>
+ </function-decl>
+ <function-decl name='zpool_read_label' mangled-name='zpool_read_label' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_read_label'>
+ <parameter type-id='95e97e5e' name='fd'/>
+ <parameter type-id='857bb57e' name='config'/>
+ <parameter type-id='7292109c' name='num_labels'/>
+ <return type-id='95e97e5e'/>
+ </function-decl>
+ <function-type size-in-bits='64' id='baa42fef'>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='80f4b756'/>
+ <parameter type-id='9c313c2d'/>
+ <parameter type-id='37e3bd22'/>
+ <return type-id='95e97e5e'/>
+ </function-type>
+ <function-type size-in-bits='64' id='29f040d2'>
+ <parameter type-id='eaa32e2f'/>
+ <parameter type-id='5ce45b60'/>
+ <return type-id='5ce45b60'/>
+ </function-type>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='zutil_nicenum.c' language='LANG_C99'>
+ <enum-decl name='zfs_nicenum_format' id='29cf1969'>
+ <underlying-type type-id='9cac1fee'/>
+ <enumerator name='ZFS_NICENUM_1024' value='0'/>
+ <enumerator name='ZFS_NICENUM_BYTES' value='1'/>
+ <enumerator name='ZFS_NICENUM_TIME' value='2'/>
+ <enumerator name='ZFS_NICENUM_RAW' value='3'/>
+ <enumerator name='ZFS_NICENUM_RAWTIME' value='4'/>
+ </enum-decl>
+ <function-decl name='zfs_nicebytes' mangled-name='zfs_nicebytes' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicebytes'>
+ <parameter type-id='9c313c2d' name='num'/>
+ <parameter type-id='26a90f95' name='buf'/>
+ <parameter type-id='b59d7dce' name='buflen'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zfs_niceraw' mangled-name='zfs_niceraw' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_niceraw'>
+ <parameter type-id='9c313c2d' name='num'/>
+ <parameter type-id='26a90f95' name='buf'/>
+ <parameter type-id='b59d7dce' name='buflen'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zfs_nicetime' mangled-name='zfs_nicetime' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicetime'>
+ <parameter type-id='9c313c2d' name='num'/>
+ <parameter type-id='26a90f95' name='buf'/>
+ <parameter type-id='b59d7dce' name='buflen'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zfs_nicenum' mangled-name='zfs_nicenum' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicenum'>
+ <parameter type-id='9c313c2d' name='num'/>
+ <parameter type-id='26a90f95' name='buf'/>
+ <parameter type-id='b59d7dce' name='buflen'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zfs_nicenum_format' mangled-name='zfs_nicenum_format' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_nicenum_format'>
+ <parameter type-id='9c313c2d' name='num'/>
+ <parameter type-id='26a90f95' name='buf'/>
+ <parameter type-id='b59d7dce' name='buflen'/>
+ <parameter type-id='29cf1969' name='format'/>
+ <return type-id='48b5725f'/>
+ </function-decl>
+ <function-decl name='zfs_isnumber' mangled-name='zfs_isnumber' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zfs_isnumber'>
+ <parameter type-id='80f4b756' name='str'/>
+ <return type-id='c19b74c3'/>
+ </function-decl>
+ </abi-instr>
+ <abi-instr version='1.0' address-size='64' path='zutil_pool.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='853fd5dc' size-in-bits='32768' id='b505fc2f'>
+ <subrange length='64' type-id='7359adad' id='b10be967'/>
</array-type-def>
- <class-decl name='dk_part' size-in-bits='960' is-struct='yes' visibility='default' id='type-id-252'>
+ <typedef-decl name='ddt_stat_t' type-id='65242dfe' id='853fd5dc'/>
+ <class-decl name='ddt_stat' size-in-bits='512' is-struct='yes' visibility='default' id='65242dfe'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='p_start' type-id='type-id-247' visibility='default'/>
+ <var-decl name='dds_blocks' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='p_size' type-id='type-id-247' visibility='default'/>
+ <var-decl name='dds_lsize' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='p_guid' type-id='type-id-248' visibility='default'/>
+ <var-decl name='dds_psize' type-id='9c313c2d' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='192'>
+ <var-decl name='dds_dsize' type-id='9c313c2d' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='p_tag' type-id='type-id-253' visibility='default'/>
+ <var-decl name='dds_ref_blocks' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='272'>
- <var-decl name='p_flag' type-id='type-id-253' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='320'>
+ <var-decl name='dds_ref_lsize' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='288'>
- <var-decl name='p_name' type-id='type-id-254' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='384'>
+ <var-decl name='dds_ref_psize' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='p_uguid' type-id='type-id-248' visibility='default'/>
+ <data-member access='public' layout-offset-in-bits='448'>
+ <var-decl name='dds_ref_dsize' type-id='9c313c2d' visibility='default'/>
</data-member>
- <data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='p_resv' type-id='type-id-255' visibility='default'/>
+ </class-decl>
+ <typedef-decl name='ddt_histogram_t' type-id='bc2b3086' id='2d7fe832'/>
+ <class-decl name='ddt_histogram' size-in-bits='32768' is-struct='yes' visibility='default' id='bc2b3086'>
+ <data-member access='public' layout-offset-in-bits='0'>
+ <var-decl name='ddh_stat' type-id='b505fc2f' visibility='default'/>
</data-member>
</class-decl>
- <typedef-decl name='ushort_t' type-id='type-id-195' id='type-id-253'/>
-
- <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='288' id='type-id-254'>
- <subrange length='36' type-id='type-id-12' id='type-id-256'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-35' size-in-bits='256' id='type-id-255'>
- <subrange length='8' type-id='type-id-12' id='type-id-69'/>
-
- </array-type-def>
-
- <array-type-def dimensions='1' type-id='type-id-252' size-in-bits='960' id='type-id-250'>
- <subrange length='1' type-id='type-id-12' id='type-id-231'/>
-
- </array-type-def>
- <pointer-type-def type-id='type-id-246' size-in-bits='64' id='type-id-257'/>
- <function-decl name='efi_err_check' mangled-name='efi_err_check' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_err_check'>
- <parameter type-id='type-id-257' name='vtoc'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='efi_type' mangled-name='efi_type' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_type'>
- <parameter type-id='type-id-1' name='fd'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='efi_free' mangled-name='efi_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_free'>
- <parameter type-id='type-id-257' name='ptr'/>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='efi_write' mangled-name='efi_write' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_write'>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-257' name='vtoc'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='efi_use_whole_disk' mangled-name='efi_use_whole_disk' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_use_whole_disk'>
- <parameter type-id='type-id-1' name='fd'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='efi_rescan' mangled-name='efi_rescan' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_rescan'>
- <parameter type-id='type-id-1' name='fd'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <pointer-type-def type-id='type-id-257' size-in-bits='64' id='type-id-258'/>
- <function-decl name='efi_alloc_and_read' mangled-name='efi_alloc_and_read' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_alloc_and_read'>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-258' name='vtoc'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='efi_alloc_and_init' mangled-name='efi_alloc_and_init' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='efi_alloc_and_init'>
- <parameter type-id='type-id-1' name='fd'/>
- <parameter type-id='type-id-7' name='nparts'/>
- <parameter type-id='type-id-258' name='vtoc'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='uuid_is_null' mangled-name='uuid_is_null' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='uuid_generate' mangled-name='uuid_generate' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='bcmp' mangled-name='bcmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='lseek' mangled-name='lseek64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='write' mangled-name='write' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
- </function-decl>
- <function-decl name='fsync' mangled-name='fsync' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <qualified-type-def type-id='2d7fe832' const='yes' id='ec92d602'/>
+ <pointer-type-def type-id='ec92d602' size-in-bits='64' id='932720f8'/>
+ <qualified-type-def type-id='853fd5dc' const='yes' id='764c298c'/>
+ <pointer-type-def type-id='764c298c' size-in-bits='64' id='dfe59052'/>
+ <pointer-type-def type-id='857bb57e' size-in-bits='64' id='75be733c'/>
+ <pointer-type-def type-id='3502e3ff' size-in-bits='64' id='4dd26a40'/>
+ <function-decl name='zpool_history_unpack' mangled-name='zpool_history_unpack' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_history_unpack'>
+ <parameter type-id='26a90f95' name='buf'/>
+ <parameter type-id='9c313c2d' name='bytes_read'/>
+ <parameter type-id='5d6479ae' name='leftover'/>
+ <parameter type-id='75be733c' name='records'/>
+ <parameter type-id='4dd26a40' name='numrecords'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='crc32' mangled-name='crc32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-84'/>
+ <function-decl name='zpool_dump_ddt' mangled-name='zpool_dump_ddt' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='zpool_dump_ddt'>
+ <parameter type-id='dfe59052' name='dds_total'/>
+ <parameter type-id='932720f8' name='ddh'/>
+ <return type-id='48b5725f'/>
</function-decl>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libzfsbootenv/libzfsbootenv.abi b/sys/contrib/openzfs/lib/libzfsbootenv/libzfsbootenv.abi
index 04bce74d8c00..f5693e9caaca 100644
--- a/sys/contrib/openzfs/lib/libzfsbootenv/libzfsbootenv.abi
+++ b/sys/contrib/openzfs/lib/libzfsbootenv/libzfsbootenv.abi
@@ -1,361 +1,206 @@
<abi-corpus architecture='elf-amd-x86_64' soname='libzfsbootenv.so.1'>
<elf-needed>
<dependency name='libzfs.so.4'/>
<dependency name='libnvpair.so.3'/>
<dependency name='libc.so.6'/>
</elf-needed>
<elf-function-symbols>
<elf-symbol name='_fini' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='_init' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_add_pair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_bootenv_print' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_get_boot_device' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_nvlist_free' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_nvlist_get' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_nvlist_set' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_remove_pair' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
<elf-symbol name='lzbe_set_boot_device' type='func-type' binding='global-binding' visibility='default-visibility' is-defined='yes'/>
</elf-function-symbols>
- <abi-instr version='1.0' address-size='64' path='lzbe_device.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfsbootenv' language='LANG_C99'>
- <type-decl name='int' size-in-bits='32' id='type-id-1'/>
- <type-decl name='char' size-in-bits='8' id='type-id-2'/>
- <qualified-type-def type-id='type-id-2' const='yes' id='type-id-3'/>
- <pointer-type-def type-id='type-id-3' size-in-bits='64' id='type-id-4'/>
- <pointer-type-def type-id='type-id-2' size-in-bits='64' id='type-id-5'/>
- <pointer-type-def type-id='type-id-5' size-in-bits='64' id='type-id-6'/>
- <function-decl name='lzbe_get_boot_device' mangled-name='lzbe_get_boot_device' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_get_boot_device'>
- <parameter type-id='type-id-4' name='pool'/>
- <parameter type-id='type-id-6' name='device'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <type-decl name='unnamed-enum-underlying-type' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='type-id-7'/>
- <enum-decl name='lzbe_flags' id='type-id-8'>
- <underlying-type type-id='type-id-7'/>
+ <abi-instr version='1.0' address-size='64' path='lzbe_device.c' language='LANG_C99'>
+ <type-decl name='char' size-in-bits='8' id='a84c031d'/>
+ <type-decl name='int' size-in-bits='32' id='95e97e5e'/>
+ <type-decl name='unnamed-enum-underlying-type-32' is-anonymous='yes' size-in-bits='32' alignment-in-bits='32' id='9cac1fee'/>
+ <type-decl name='void' id='48b5725f'/>
+ <typedef-decl name='lzbe_flags_t' type-id='2b77720b' id='a1936f04'/>
+ <enum-decl name='lzbe_flags' id='2b77720b'>
+ <underlying-type type-id='9cac1fee'/>
<enumerator name='lzbe_add' value='0'/>
<enumerator name='lzbe_replace' value='1'/>
</enum-decl>
- <typedef-decl name='lzbe_flags_t' type-id='type-id-8' id='type-id-9'/>
- <function-decl name='lzbe_set_boot_device' mangled-name='lzbe_set_boot_device' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_set_boot_device'>
- <parameter type-id='type-id-4' name='pool'/>
- <parameter type-id='type-id-9' name='flag'/>
- <parameter type-id='type-id-4' name='device'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <type-decl name='void' id='type-id-10'/>
- <function-decl name='libzfs_init' mangled-name='libzfs_init' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='zpool_open' mangled-name='zpool_open' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='zpool_get_bootenv' mangled-name='zpool_get_bootenv' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='zpool_close' mangled-name='zpool_close' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='libzfs_fini' mangled-name='libzfs_fini' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_lookup_string' mangled-name='nvlist_lookup_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_free' mangled-name='nvlist_free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='strdup' mangled-name='strdup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='__stack_chk_fail' mangled-name='__stack_chk_fail' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='__asprintf_chk' mangled-name='__asprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='__fprintf_chk' mangled-name='__fprintf_chk' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='fnvlist_alloc' mangled-name='fnvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='fnvlist_add_uint64' mangled-name='fnvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_exists' mangled-name='nvlist_exists' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='zpool_set_bootenv' mangled-name='zpool_set_bootenv' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='fnvlist_free' mangled-name='fnvlist_free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='fnvlist_add_string' mangled-name='fnvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='free' mangled-name='free' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_lookup_uint64' mangled-name='nvlist_lookup_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='libzfs_error_description' mangled-name='libzfs_error_description' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
+ <pointer-type-def type-id='a84c031d' size-in-bits='64' id='26a90f95'/>
+ <pointer-type-def type-id='26a90f95' size-in-bits='64' id='9b23c9ad'/>
+ <qualified-type-def type-id='a84c031d' const='yes' id='9b45d938'/>
+ <pointer-type-def type-id='9b45d938' size-in-bits='64' id='80f4b756'/>
+ <function-decl name='lzbe_get_boot_device' mangled-name='lzbe_get_boot_device' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_get_boot_device'>
+ <parameter type-id='80f4b756' name='pool'/>
+ <parameter type-id='9b23c9ad' name='device'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <function-decl name='fnvlist_remove' mangled-name='fnvlist_remove' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
+ <function-decl name='lzbe_set_boot_device' mangled-name='lzbe_set_boot_device' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_set_boot_device'>
+ <parameter type-id='80f4b756' name='pool'/>
+ <parameter type-id='a1936f04' name='flag'/>
+ <parameter type-id='80f4b756' name='device'/>
+ <return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='lzbe_pair.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfsbootenv' language='LANG_C99'>
- <pointer-type-def type-id='type-id-10' size-in-bits='64' id='type-id-11'/>
+ <abi-instr version='1.0' address-size='64' path='lzbe_pair.c' language='LANG_C99'>
+ <type-decl name='unsigned long int' size-in-bits='64' id='7359adad'/>
+ <typedef-decl name='size_t' type-id='7359adad' id='b59d7dce'/>
+ <pointer-type-def type-id='48b5725f' size-in-bits='64' id='eaa32e2f'/>
+ <pointer-type-def type-id='eaa32e2f' size-in-bits='64' id='63e171df'/>
<function-decl name='lzbe_remove_pair' mangled-name='lzbe_remove_pair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_remove_pair'>
- <parameter type-id='type-id-11' name='ptr'/>
- <parameter type-id='type-id-4' name='key'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='eaa32e2f' name='ptr'/>
+ <parameter type-id='80f4b756' name='key'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <type-decl name='unsigned long int' size-in-bits='64' id='type-id-12'/>
- <typedef-decl name='size_t' type-id='type-id-12' id='type-id-13'/>
<function-decl name='lzbe_add_pair' mangled-name='lzbe_add_pair' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_add_pair'>
- <parameter type-id='type-id-11' name='ptr'/>
- <parameter type-id='type-id-4' name='key'/>
- <parameter type-id='type-id-4' name='type'/>
- <parameter type-id='type-id-11' name='value'/>
- <parameter type-id='type-id-13' name='size'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='eaa32e2f' name='ptr'/>
+ <parameter type-id='80f4b756' name='key'/>
+ <parameter type-id='80f4b756' name='type'/>
+ <parameter type-id='eaa32e2f' name='value'/>
+ <parameter type-id='b59d7dce' name='size'/>
+ <return type-id='95e97e5e'/>
</function-decl>
<function-decl name='lzbe_nvlist_free' mangled-name='lzbe_nvlist_free' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_nvlist_free'>
- <parameter type-id='type-id-11' name='ptr'/>
- <return type-id='type-id-10'/>
+ <parameter type-id='eaa32e2f' name='ptr'/>
+ <return type-id='48b5725f'/>
</function-decl>
<function-decl name='lzbe_nvlist_set' mangled-name='lzbe_nvlist_set' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_nvlist_set'>
- <parameter type-id='type-id-4' name='pool'/>
- <parameter type-id='type-id-4' name='key'/>
- <parameter type-id='type-id-11' name='ptr'/>
- <return type-id='type-id-1'/>
+ <parameter type-id='80f4b756' name='pool'/>
+ <parameter type-id='80f4b756' name='key'/>
+ <parameter type-id='eaa32e2f' name='ptr'/>
+ <return type-id='95e97e5e'/>
</function-decl>
- <pointer-type-def type-id='type-id-11' size-in-bits='64' id='type-id-14'/>
<function-decl name='lzbe_nvlist_get' mangled-name='lzbe_nvlist_get' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_nvlist_get'>
- <parameter type-id='type-id-4' name='pool'/>
- <parameter type-id='type-id-4' name='key'/>
- <parameter type-id='type-id-14' name='ptr'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='nvlist_remove_all' mangled-name='nvlist_remove_all' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='strcmp' mangled-name='strcmp' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_uint8_array' mangled-name='nvlist_add_uint8_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_byte' mangled-name='nvlist_add_byte' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_int16' mangled-name='nvlist_add_int16' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_uint16' mangled-name='nvlist_add_uint16' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_int32' mangled-name='nvlist_add_int32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_uint32' mangled-name='nvlist_add_uint32' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_int64' mangled-name='nvlist_add_int64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_uint64' mangled-name='nvlist_add_uint64' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_string' mangled-name='nvlist_add_string' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_byte_array' mangled-name='nvlist_add_byte_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_int16_array' mangled-name='nvlist_add_int16_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_uint16_array' mangled-name='nvlist_add_uint16_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_int32_array' mangled-name='nvlist_add_int32_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_uint32_array' mangled-name='nvlist_add_uint32_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_int64_array' mangled-name='nvlist_add_int64_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_uint64_array' mangled-name='nvlist_add_uint64_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_string_array' mangled-name='nvlist_add_string_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_nvlist' mangled-name='nvlist_add_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_nvlist_array' mangled-name='nvlist_add_nvlist_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_boolean_value' mangled-name='nvlist_add_boolean_value' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_int8' mangled-name='nvlist_add_int8' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_uint8' mangled-name='nvlist_add_uint8' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_boolean_array' mangled-name='nvlist_add_boolean_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_add_int8_array' mangled-name='nvlist_add_int8_array' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_lookup_nvlist' mangled-name='nvlist_lookup_nvlist' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_dup' mangled-name='nvlist_dup' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_alloc' mangled-name='nvlist_alloc' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
+ <parameter type-id='80f4b756' name='pool'/>
+ <parameter type-id='80f4b756' name='key'/>
+ <parameter type-id='63e171df' name='ptr'/>
+ <return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
- <abi-instr version='1.0' address-size='64' path='lzbe_util.c' comp-dir-path='/home/runner/work/zfs/zfs/lib/libzfsbootenv' language='LANG_C99'>
- <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='type-id-15'>
+ <abi-instr version='1.0' address-size='64' path='lzbe_util.c' language='LANG_C99'>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='8' id='89feb1ec'>
+ <subrange length='1' type-id='7359adad' id='52f813b4'/>
+ </array-type-def>
+ <array-type-def dimensions='1' type-id='a84c031d' size-in-bits='160' id='664ac0b7'>
+ <subrange length='20' type-id='7359adad' id='fdca39cf'/>
+ </array-type-def>
+ <type-decl name='long int' size-in-bits='64' id='bd54fe1a'/>
+ <type-decl name='signed char' size-in-bits='8' id='28577a57'/>
+ <type-decl name='unsigned short int' size-in-bits='16' id='8efea9e5'/>
+ <typedef-decl name='FILE' type-id='ec1ed955' id='aa12d1ba'/>
+ <class-decl name='_IO_FILE' size-in-bits='1728' is-struct='yes' visibility='default' id='ec1ed955'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_flags' type-id='type-id-1' visibility='default'/>
+ <var-decl name='_flags' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_IO_read_ptr' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_IO_read_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_IO_read_end' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_IO_read_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='192'>
- <var-decl name='_IO_read_base' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_IO_read_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='256'>
- <var-decl name='_IO_write_base' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_IO_write_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='320'>
- <var-decl name='_IO_write_ptr' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_IO_write_ptr' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='384'>
- <var-decl name='_IO_write_end' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_IO_write_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='448'>
- <var-decl name='_IO_buf_base' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_IO_buf_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='512'>
- <var-decl name='_IO_buf_end' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_IO_buf_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='576'>
- <var-decl name='_IO_save_base' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_IO_save_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='640'>
- <var-decl name='_IO_backup_base' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_IO_backup_base' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='704'>
- <var-decl name='_IO_save_end' type-id='type-id-5' visibility='default'/>
+ <var-decl name='_IO_save_end' type-id='26a90f95' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='768'>
- <var-decl name='_markers' type-id='type-id-16' visibility='default'/>
+ <var-decl name='_markers' type-id='e4c6fa61' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='832'>
- <var-decl name='_chain' type-id='type-id-17' visibility='default'/>
+ <var-decl name='_chain' type-id='dca988a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='896'>
- <var-decl name='_fileno' type-id='type-id-1' visibility='default'/>
+ <var-decl name='_fileno' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='928'>
- <var-decl name='_flags2' type-id='type-id-1' visibility='default'/>
+ <var-decl name='_flags2' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='960'>
- <var-decl name='_old_offset' type-id='type-id-18' visibility='default'/>
+ <var-decl name='_old_offset' type-id='79989e9c' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1024'>
- <var-decl name='_cur_column' type-id='type-id-19' visibility='default'/>
+ <var-decl name='_cur_column' type-id='8efea9e5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1040'>
- <var-decl name='_vtable_offset' type-id='type-id-20' visibility='default'/>
+ <var-decl name='_vtable_offset' type-id='28577a57' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1048'>
- <var-decl name='_shortbuf' type-id='type-id-21' visibility='default'/>
+ <var-decl name='_shortbuf' type-id='89feb1ec' visibility='default'/>
+ </data-member>
+ <data-member access='public' layout-offset-in-bits='1088'>
+ <var-decl name='_lock' type-id='cecf4ea7' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1152'>
- <var-decl name='_offset' type-id='type-id-22' visibility='default'/>
+ <var-decl name='_offset' type-id='724e4de6' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1216'>
- <var-decl name='__pad1' type-id='type-id-11' visibility='default'/>
+ <var-decl name='__pad1' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1280'>
- <var-decl name='__pad2' type-id='type-id-11' visibility='default'/>
+ <var-decl name='__pad2' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1344'>
- <var-decl name='__pad3' type-id='type-id-11' visibility='default'/>
+ <var-decl name='__pad3' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1408'>
- <var-decl name='__pad4' type-id='type-id-11' visibility='default'/>
+ <var-decl name='__pad4' type-id='eaa32e2f' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1472'>
- <var-decl name='__pad5' type-id='type-id-13' visibility='default'/>
+ <var-decl name='__pad5' type-id='b59d7dce' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1536'>
- <var-decl name='_mode' type-id='type-id-1' visibility='default'/>
+ <var-decl name='_mode' type-id='95e97e5e' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='1568'>
- <var-decl name='_unused2' type-id='type-id-23' visibility='default'/>
+ <var-decl name='_unused2' type-id='664ac0b7' visibility='default'/>
</data-member>
</class-decl>
- <class-decl name='_IO_marker' size-in-bits='192' is-struct='yes' visibility='default' id='type-id-24'>
+ <class-decl name='_IO_marker' size-in-bits='192' is-struct='yes' visibility='default' id='010ae0b9'>
<data-member access='public' layout-offset-in-bits='0'>
- <var-decl name='_next' type-id='type-id-16' visibility='default'/>
+ <var-decl name='_next' type-id='e4c6fa61' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='64'>
- <var-decl name='_sbuf' type-id='type-id-17' visibility='default'/>
+ <var-decl name='_sbuf' type-id='dca988a5' visibility='default'/>
</data-member>
<data-member access='public' layout-offset-in-bits='128'>
- <var-decl name='_pos' type-id='type-id-1' visibility='default'/>
+ <var-decl name='_pos' type-id='95e97e5e' visibility='default'/>
</data-member>
</class-decl>
- <pointer-type-def type-id='type-id-24' size-in-bits='64' id='type-id-16'/>
- <pointer-type-def type-id='type-id-15' size-in-bits='64' id='type-id-17'/>
- <type-decl name='long int' size-in-bits='64' id='type-id-25'/>
- <typedef-decl name='__off_t' type-id='type-id-25' id='type-id-18'/>
- <type-decl name='unsigned short int' size-in-bits='16' id='type-id-19'/>
- <type-decl name='signed char' size-in-bits='8' id='type-id-20'/>
-
- <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='8' id='type-id-21'>
- <subrange length='1' type-id='type-id-12' id='type-id-26'/>
-
- </array-type-def>
- <typedef-decl name='__off64_t' type-id='type-id-25' id='type-id-22'/>
-
- <array-type-def dimensions='1' type-id='type-id-2' size-in-bits='160' id='type-id-23'>
- <subrange length='20' type-id='type-id-12' id='type-id-27'/>
-
- </array-type-def>
- <typedef-decl name='FILE' type-id='type-id-15' id='type-id-28'/>
- <pointer-type-def type-id='type-id-28' size-in-bits='64' id='type-id-29'/>
+ <typedef-decl name='__off_t' type-id='bd54fe1a' id='79989e9c'/>
+ <typedef-decl name='_IO_lock_t' type-id='48b5725f' id='bb4788fa'/>
+ <typedef-decl name='__off64_t' type-id='bd54fe1a' id='724e4de6'/>
+ <pointer-type-def type-id='aa12d1ba' size-in-bits='64' id='822cd80b'/>
+ <pointer-type-def type-id='ec1ed955' size-in-bits='64' id='dca988a5'/>
+ <pointer-type-def type-id='bb4788fa' size-in-bits='64' id='cecf4ea7'/>
+ <pointer-type-def type-id='010ae0b9' size-in-bits='64' id='e4c6fa61'/>
<function-decl name='lzbe_bootenv_print' mangled-name='lzbe_bootenv_print' visibility='default' binding='global' size-in-bits='64' elf-symbol-id='lzbe_bootenv_print'>
- <parameter type-id='type-id-4' name='pool'/>
- <parameter type-id='type-id-4' name='nvlist'/>
- <parameter type-id='type-id-29' name='of'/>
- <return type-id='type-id-1'/>
- </function-decl>
- <function-decl name='lzbe_nvlist_get' mangled-name='lzbe_nvlist_get' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
- </function-decl>
- <function-decl name='nvlist_print' mangled-name='nvlist_print' visibility='default' binding='global' size-in-bits='64'>
- <return type-id='type-id-10'/>
+ <parameter type-id='80f4b756' name='pool'/>
+ <parameter type-id='80f4b756' name='nvlist'/>
+ <parameter type-id='822cd80b' name='of'/>
+ <return type-id='95e97e5e'/>
</function-decl>
</abi-instr>
</abi-corpus>
diff --git a/sys/contrib/openzfs/lib/libzfsbootenv/lzbe_device.c b/sys/contrib/openzfs/lib/libzfsbootenv/lzbe_device.c
index 2d8833b4fff2..2d9c7b749ef2 100644
--- a/sys/contrib/openzfs/lib/libzfsbootenv/lzbe_device.c
+++ b/sys/contrib/openzfs/lib/libzfsbootenv/lzbe_device.c
@@ -1,163 +1,163 @@
/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*/
/*
* Copyright 2020 Toomas Soome <tsoome@me.com>
*/
#include <sys/types.h>
#include <string.h>
#include <libzfs.h>
#include <libzfsbootenv.h>
#include <sys/zfs_bootenv.h>
#include <sys/vdev_impl.h>
/*
* Store device name to zpool label bootenv area.
* This call will set bootenv version to VB_NVLIST, if bootenv currently
* does contain other version, then old data will be replaced.
*/
int
lzbe_set_boot_device(const char *pool, lzbe_flags_t flag, const char *device)
{
libzfs_handle_t *hdl;
zpool_handle_t *zphdl;
nvlist_t *nv;
char *descriptor;
uint64_t version;
int rv = -1;
if (pool == NULL || *pool == '\0')
return (rv);
if ((hdl = libzfs_init()) == NULL)
return (rv);
zphdl = zpool_open(hdl, pool);
if (zphdl == NULL) {
libzfs_fini(hdl);
return (rv);
}
switch (flag) {
case lzbe_add:
rv = zpool_get_bootenv(zphdl, &nv);
if (rv == 0) {
/*
* We got the nvlist, check for version.
* if version is missing or is not VB_NVLIST,
* create new list.
*/
rv = nvlist_lookup_uint64(nv, BOOTENV_VERSION,
&version);
if (rv == 0 && version == VB_NVLIST)
break;
/* Drop this nvlist */
fnvlist_free(nv);
}
- /* FALLTHROUGH */
+ fallthrough;
case lzbe_replace:
nv = fnvlist_alloc();
break;
default:
return (rv);
}
/* version is mandatory */
fnvlist_add_uint64(nv, BOOTENV_VERSION, VB_NVLIST);
/*
* If device name is empty, remove boot device configuration.
*/
if ((device == NULL || *device == '\0')) {
if (nvlist_exists(nv, OS_BOOTONCE))
fnvlist_remove(nv, OS_BOOTONCE);
} else {
/*
* Use device name directly if it does start with
* prefix "zfs:". Otherwise, add prefix and suffix.
*/
if (strncmp(device, "zfs:", 4) == 0) {
fnvlist_add_string(nv, OS_BOOTONCE, device);
} else {
if (asprintf(&descriptor, "zfs:%s:", device) > 0) {
fnvlist_add_string(nv, OS_BOOTONCE, descriptor);
free(descriptor);
} else
rv = ENOMEM;
}
}
rv = zpool_set_bootenv(zphdl, nv);
if (rv != 0)
fprintf(stderr, "%s\n", libzfs_error_description(hdl));
fnvlist_free(nv);
zpool_close(zphdl);
libzfs_fini(hdl);
return (rv);
}
/*
* Return boot device name from bootenv, if set.
*/
int
lzbe_get_boot_device(const char *pool, char **device)
{
libzfs_handle_t *hdl;
zpool_handle_t *zphdl;
nvlist_t *nv;
char *val;
int rv = -1;
if (pool == NULL || *pool == '\0' || device == NULL)
return (rv);
if ((hdl = libzfs_init()) == NULL)
return (rv);
zphdl = zpool_open(hdl, pool);
if (zphdl == NULL) {
libzfs_fini(hdl);
return (rv);
}
rv = zpool_get_bootenv(zphdl, &nv);
if (rv == 0) {
rv = nvlist_lookup_string(nv, OS_BOOTONCE, &val);
if (rv == 0) {
/*
* zfs device descriptor is in form of "zfs:dataset:",
* we only do need dataset name.
*/
if (strncmp(val, "zfs:", 4) == 0) {
val += 4;
val = strdup(val);
if (val != NULL) {
size_t len = strlen(val);
if (val[len - 1] == ':')
val[len - 1] = '\0';
*device = val;
} else {
rv = ENOMEM;
}
} else {
rv = EINVAL;
}
}
nvlist_free(nv);
}
zpool_close(zphdl);
libzfs_fini(hdl);
return (rv);
}
diff --git a/sys/contrib/openzfs/lib/libzutil/zutil_import.c b/sys/contrib/openzfs/lib/libzutil/zutil_import.c
index 95fd0ec0af85..04b9f26abb92 100644
--- a/sys/contrib/openzfs/lib/libzutil/zutil_import.c
+++ b/sys/contrib/openzfs/lib/libzutil/zutil_import.c
@@ -1,1860 +1,1861 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
* Copyright 2015 RackTop Systems.
* Copyright (c) 2016, Intel Corporation.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
/*
* Pool import support functions.
*
* Used by zpool, ztest, zdb, and zhack to locate importable configs. Since
* these commands are expected to run in the global zone, we can assume
* that the devices are all readable when called.
*
* To import a pool, we rely on reading the configuration information from the
* ZFS label of each device. If we successfully read the label, then we
* organize the configuration information in the following hierarchy:
*
* pool guid -> toplevel vdev guid -> label txg
*
* Duplicate entries matching this same tuple will be discarded. Once we have
* examined every device, we pick the best label txg config for each toplevel
* vdev. We then arrange these toplevel vdevs into a complete pool config, and
* update any paths that have changed. Finally, we attempt to import the pool
* using our derived config, and record the results.
*/
#include <aio.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <libintl.h>
#include <libgen.h>
#include <stddef.h>
#include <stdlib.h>
#include <string.h>
#include <sys/stat.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/dktp/fdisk.h>
#include <sys/vdev_impl.h>
#include <sys/fs/zfs.h>
#include <thread_pool.h>
#include <libzutil.h>
#include <libnvpair.h>
#include "zutil_import.h"
static __attribute__((format(printf, 2, 3))) void
zutil_error_aux(libpc_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) vsnprintf(hdl->lpc_desc, sizeof (hdl->lpc_desc), fmt, ap);
hdl->lpc_desc_active = B_TRUE;
va_end(ap);
}
static void
zutil_verror(libpc_handle_t *hdl, const char *error, const char *fmt,
va_list ap)
{
char action[1024];
(void) vsnprintf(action, sizeof (action), fmt, ap);
if (hdl->lpc_desc_active)
hdl->lpc_desc_active = B_FALSE;
else
hdl->lpc_desc[0] = '\0';
if (hdl->lpc_printerr) {
if (hdl->lpc_desc[0] != '\0')
error = hdl->lpc_desc;
(void) fprintf(stderr, "%s: %s\n", action, error);
}
}
static __attribute__((format(printf, 3, 4))) int
zutil_error_fmt(libpc_handle_t *hdl, const char *error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
zutil_verror(hdl, error, fmt, ap);
va_end(ap);
return (-1);
}
static int
zutil_error(libpc_handle_t *hdl, const char *error, const char *msg)
{
return (zutil_error_fmt(hdl, error, "%s", msg));
}
static int
zutil_no_memory(libpc_handle_t *hdl)
{
zutil_error(hdl, EZFS_NOMEM, "internal error");
exit(1);
}
void *
zutil_alloc(libpc_handle_t *hdl, size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
(void) zutil_no_memory(hdl);
return (data);
}
char *
zutil_strdup(libpc_handle_t *hdl, const char *str)
{
char *ret;
if ((ret = strdup(str)) == NULL)
(void) zutil_no_memory(hdl);
return (ret);
}
static char *
zutil_strndup(libpc_handle_t *hdl, const char *str, size_t n)
{
char *ret;
if ((ret = strndup(str, n)) == NULL)
(void) zutil_no_memory(hdl);
return (ret);
}
/*
* Intermediate structures used to gather configuration information.
*/
typedef struct config_entry {
uint64_t ce_txg;
nvlist_t *ce_config;
struct config_entry *ce_next;
} config_entry_t;
typedef struct vdev_entry {
uint64_t ve_guid;
config_entry_t *ve_configs;
struct vdev_entry *ve_next;
} vdev_entry_t;
typedef struct pool_entry {
uint64_t pe_guid;
vdev_entry_t *pe_vdevs;
struct pool_entry *pe_next;
} pool_entry_t;
typedef struct name_entry {
char *ne_name;
uint64_t ne_guid;
uint64_t ne_order;
uint64_t ne_num_labels;
struct name_entry *ne_next;
} name_entry_t;
typedef struct pool_list {
pool_entry_t *pools;
name_entry_t *names;
} pool_list_t;
/*
* Go through and fix up any path and/or devid information for the given vdev
* configuration.
*/
static int
fix_paths(libpc_handle_t *hdl, nvlist_t *nv, name_entry_t *names)
{
nvlist_t **child;
uint_t c, children;
uint64_t guid;
name_entry_t *ne, *best;
char *path;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (fix_paths(hdl, child[c], names) != 0)
return (-1);
return (0);
}
/*
* This is a leaf (file or disk) vdev. In either case, go through
* the name list and see if we find a matching guid. If so, replace
* the path and see if we can calculate a new devid.
*
* There may be multiple names associated with a particular guid, in
* which case we have overlapping partitions or multiple paths to the
* same disk. In this case we prefer to use the path name which
* matches the ZPOOL_CONFIG_PATH. If no matching entry is found we
* use the lowest order device which corresponds to the first match
* while traversing the ZPOOL_IMPORT_PATH search path.
*/
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
path = NULL;
best = NULL;
for (ne = names; ne != NULL; ne = ne->ne_next) {
if (ne->ne_guid == guid) {
if (path == NULL) {
best = ne;
break;
}
if ((strlen(path) == strlen(ne->ne_name)) &&
strncmp(path, ne->ne_name, strlen(path)) == 0) {
best = ne;
break;
}
if (best == NULL) {
best = ne;
continue;
}
/* Prefer paths with move vdev labels. */
if (ne->ne_num_labels > best->ne_num_labels) {
best = ne;
continue;
}
/* Prefer paths earlier in the search order. */
if (ne->ne_num_labels == best->ne_num_labels &&
ne->ne_order < best->ne_order) {
best = ne;
continue;
}
}
}
if (best == NULL)
return (0);
if (nvlist_add_string(nv, ZPOOL_CONFIG_PATH, best->ne_name) != 0)
return (-1);
update_vdev_config_dev_strs(nv);
return (0);
}
/*
* Add the given configuration to the list of known devices.
*/
static int
add_config(libpc_handle_t *hdl, pool_list_t *pl, const char *path,
int order, int num_labels, nvlist_t *config)
{
uint64_t pool_guid, vdev_guid, top_guid, txg, state;
pool_entry_t *pe;
vdev_entry_t *ve;
config_entry_t *ce;
name_entry_t *ne;
/*
* If this is a hot spare not currently in use or level 2 cache
* device, add it to the list of names to translate, but don't do
* anything else.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&state) == 0 &&
(state == POOL_STATE_SPARE || state == POOL_STATE_L2CACHE) &&
nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID, &vdev_guid) == 0) {
if ((ne = zutil_alloc(hdl, sizeof (name_entry_t))) == NULL)
return (-1);
if ((ne->ne_name = zutil_strdup(hdl, path)) == NULL) {
free(ne);
return (-1);
}
ne->ne_guid = vdev_guid;
ne->ne_order = order;
ne->ne_num_labels = num_labels;
ne->ne_next = pl->names;
pl->names = ne;
return (0);
}
/*
* If we have a valid config but cannot read any of these fields, then
* it means we have a half-initialized label. In vdev_label_init()
* we write a label with txg == 0 so that we can identify the device
* in case the user refers to the same disk later on. If we fail to
* create the pool, we'll be left with a label in this state
* which should not be considered part of a valid pool.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_GUID,
&vdev_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_TOP_GUID,
&top_guid) != 0 ||
nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0) {
return (0);
}
/*
* First, see if we know about this pool. If not, then add it to the
* list of known pools.
*/
for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
if (pe->pe_guid == pool_guid)
break;
}
if (pe == NULL) {
if ((pe = zutil_alloc(hdl, sizeof (pool_entry_t))) == NULL) {
return (-1);
}
pe->pe_guid = pool_guid;
pe->pe_next = pl->pools;
pl->pools = pe;
}
/*
* Second, see if we know about this toplevel vdev. Add it if its
* missing.
*/
for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
if (ve->ve_guid == top_guid)
break;
}
if (ve == NULL) {
if ((ve = zutil_alloc(hdl, sizeof (vdev_entry_t))) == NULL) {
return (-1);
}
ve->ve_guid = top_guid;
ve->ve_next = pe->pe_vdevs;
pe->pe_vdevs = ve;
}
/*
* Third, see if we have a config with a matching transaction group. If
* so, then we do nothing. Otherwise, add it to the list of known
* configs.
*/
for (ce = ve->ve_configs; ce != NULL; ce = ce->ce_next) {
if (ce->ce_txg == txg)
break;
}
if (ce == NULL) {
if ((ce = zutil_alloc(hdl, sizeof (config_entry_t))) == NULL) {
return (-1);
}
ce->ce_txg = txg;
ce->ce_config = fnvlist_dup(config);
ce->ce_next = ve->ve_configs;
ve->ve_configs = ce;
}
/*
* At this point we've successfully added our config to the list of
* known configs. The last thing to do is add the vdev guid -> path
* mappings so that we can fix up the configuration as necessary before
* doing the import.
*/
if ((ne = zutil_alloc(hdl, sizeof (name_entry_t))) == NULL)
return (-1);
if ((ne->ne_name = zutil_strdup(hdl, path)) == NULL) {
free(ne);
return (-1);
}
ne->ne_guid = vdev_guid;
ne->ne_order = order;
ne->ne_num_labels = num_labels;
ne->ne_next = pl->names;
pl->names = ne;
return (0);
}
static int
zutil_pool_active(libpc_handle_t *hdl, const char *name, uint64_t guid,
boolean_t *isactive)
{
ASSERT(hdl->lpc_ops->pco_pool_active != NULL);
int error = hdl->lpc_ops->pco_pool_active(hdl->lpc_lib_handle, name,
guid, isactive);
return (error);
}
static nvlist_t *
zutil_refresh_config(libpc_handle_t *hdl, nvlist_t *tryconfig)
{
ASSERT(hdl->lpc_ops->pco_refresh_config != NULL);
return (hdl->lpc_ops->pco_refresh_config(hdl->lpc_lib_handle,
tryconfig));
}
/*
* Determine if the vdev id is a hole in the namespace.
*/
static boolean_t
vdev_is_hole(uint64_t *hole_array, uint_t holes, uint_t id)
{
int c;
for (c = 0; c < holes; c++) {
/* Top-level is a hole */
if (hole_array[c] == id)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Convert our list of pools into the definitive set of configurations. We
* start by picking the best config for each toplevel vdev. Once that's done,
* we assemble the toplevel vdevs into a full config for the pool. We make a
* pass to fix up any incorrect paths, and then add it to the main list to
* return to the user.
*/
static nvlist_t *
get_configs(libpc_handle_t *hdl, pool_list_t *pl, boolean_t active_ok,
nvlist_t *policy)
{
pool_entry_t *pe;
vdev_entry_t *ve;
config_entry_t *ce;
nvlist_t *ret = NULL, *config = NULL, *tmp = NULL, *nvtop, *nvroot;
nvlist_t **spares, **l2cache;
uint_t i, nspares, nl2cache;
boolean_t config_seen;
uint64_t best_txg;
char *name, *hostname = NULL;
uint64_t guid;
uint_t children = 0;
nvlist_t **child = NULL;
uint_t holes;
uint64_t *hole_array, max_id;
uint_t c;
boolean_t isactive;
uint64_t hostid;
nvlist_t *nvl;
boolean_t valid_top_config = B_FALSE;
if (nvlist_alloc(&ret, 0, 0) != 0)
goto nomem;
for (pe = pl->pools; pe != NULL; pe = pe->pe_next) {
uint64_t id, max_txg = 0;
if (nvlist_alloc(&config, NV_UNIQUE_NAME, 0) != 0)
goto nomem;
config_seen = B_FALSE;
/*
* Iterate over all toplevel vdevs. Grab the pool configuration
* from the first one we find, and then go through the rest and
* add them as necessary to the 'vdevs' member of the config.
*/
for (ve = pe->pe_vdevs; ve != NULL; ve = ve->ve_next) {
/*
* Determine the best configuration for this vdev by
* selecting the config with the latest transaction
* group.
*/
best_txg = 0;
for (ce = ve->ve_configs; ce != NULL;
ce = ce->ce_next) {
if (ce->ce_txg > best_txg) {
tmp = ce->ce_config;
best_txg = ce->ce_txg;
}
}
/*
* We rely on the fact that the max txg for the
* pool will contain the most up-to-date information
* about the valid top-levels in the vdev namespace.
*/
if (best_txg > max_txg) {
(void) nvlist_remove(config,
ZPOOL_CONFIG_VDEV_CHILDREN,
DATA_TYPE_UINT64);
(void) nvlist_remove(config,
ZPOOL_CONFIG_HOLE_ARRAY,
DATA_TYPE_UINT64_ARRAY);
max_txg = best_txg;
hole_array = NULL;
holes = 0;
max_id = 0;
valid_top_config = B_FALSE;
if (nvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_VDEV_CHILDREN, &max_id) == 0) {
verify(nvlist_add_uint64(config,
ZPOOL_CONFIG_VDEV_CHILDREN,
max_id) == 0);
valid_top_config = B_TRUE;
}
if (nvlist_lookup_uint64_array(tmp,
ZPOOL_CONFIG_HOLE_ARRAY, &hole_array,
&holes) == 0) {
verify(nvlist_add_uint64_array(config,
ZPOOL_CONFIG_HOLE_ARRAY,
hole_array, holes) == 0);
}
}
if (!config_seen) {
/*
* Copy the relevant pieces of data to the pool
* configuration:
*
* version
* pool guid
* name
* comment (if available)
* compatibility features (if available)
* pool state
* hostid (if available)
* hostname (if available)
*/
uint64_t state, version;
char *comment = NULL;
char *compatibility = NULL;
version = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_VERSION);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_VERSION, version);
guid = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_POOL_GUID);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_POOL_GUID, guid);
name = fnvlist_lookup_string(tmp,
ZPOOL_CONFIG_POOL_NAME);
fnvlist_add_string(config,
ZPOOL_CONFIG_POOL_NAME, name);
if (nvlist_lookup_string(tmp,
ZPOOL_CONFIG_COMMENT, &comment) == 0)
fnvlist_add_string(config,
ZPOOL_CONFIG_COMMENT, comment);
if (nvlist_lookup_string(tmp,
ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
fnvlist_add_string(config,
ZPOOL_CONFIG_COMPATIBILITY,
compatibility);
state = fnvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_POOL_STATE);
fnvlist_add_uint64(config,
ZPOOL_CONFIG_POOL_STATE, state);
hostid = 0;
if (nvlist_lookup_uint64(tmp,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
fnvlist_add_uint64(config,
ZPOOL_CONFIG_HOSTID, hostid);
hostname = fnvlist_lookup_string(tmp,
ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(config,
ZPOOL_CONFIG_HOSTNAME, hostname);
}
config_seen = B_TRUE;
}
/*
* Add this top-level vdev to the child array.
*/
verify(nvlist_lookup_nvlist(tmp,
ZPOOL_CONFIG_VDEV_TREE, &nvtop) == 0);
verify(nvlist_lookup_uint64(nvtop, ZPOOL_CONFIG_ID,
&id) == 0);
if (id >= children) {
nvlist_t **newchild;
newchild = zutil_alloc(hdl, (id + 1) *
sizeof (nvlist_t *));
if (newchild == NULL)
goto nomem;
for (c = 0; c < children; c++)
newchild[c] = child[c];
free(child);
child = newchild;
children = id + 1;
}
if (nvlist_dup(nvtop, &child[id], 0) != 0)
goto nomem;
}
/*
* If we have information about all the top-levels then
* clean up the nvlist which we've constructed. This
* means removing any extraneous devices that are
* beyond the valid range or adding devices to the end
* of our array which appear to be missing.
*/
if (valid_top_config) {
if (max_id < children) {
for (c = max_id; c < children; c++)
nvlist_free(child[c]);
children = max_id;
} else if (max_id > children) {
nvlist_t **newchild;
newchild = zutil_alloc(hdl, (max_id) *
sizeof (nvlist_t *));
if (newchild == NULL)
goto nomem;
for (c = 0; c < children; c++)
newchild[c] = child[c];
free(child);
child = newchild;
children = max_id;
}
}
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
/*
* The vdev namespace may contain holes as a result of
* device removal. We must add them back into the vdev
* tree before we process any missing devices.
*/
if (holes > 0) {
ASSERT(valid_top_config);
for (c = 0; c < children; c++) {
nvlist_t *holey;
if (child[c] != NULL ||
!vdev_is_hole(hole_array, holes, c))
continue;
if (nvlist_alloc(&holey, NV_UNIQUE_NAME,
0) != 0)
goto nomem;
/*
* Holes in the namespace are treated as
* "hole" top-level vdevs and have a
* special flag set on them.
*/
if (nvlist_add_string(holey,
ZPOOL_CONFIG_TYPE,
VDEV_TYPE_HOLE) != 0 ||
nvlist_add_uint64(holey,
ZPOOL_CONFIG_ID, c) != 0 ||
nvlist_add_uint64(holey,
ZPOOL_CONFIG_GUID, 0ULL) != 0) {
nvlist_free(holey);
goto nomem;
}
child[c] = holey;
}
}
/*
* Look for any missing top-level vdevs. If this is the case,
* create a faked up 'missing' vdev as a placeholder. We cannot
* simply compress the child array, because the kernel performs
* certain checks to make sure the vdev IDs match their location
* in the configuration.
*/
for (c = 0; c < children; c++) {
if (child[c] == NULL) {
nvlist_t *missing;
if (nvlist_alloc(&missing, NV_UNIQUE_NAME,
0) != 0)
goto nomem;
if (nvlist_add_string(missing,
ZPOOL_CONFIG_TYPE,
VDEV_TYPE_MISSING) != 0 ||
nvlist_add_uint64(missing,
ZPOOL_CONFIG_ID, c) != 0 ||
nvlist_add_uint64(missing,
ZPOOL_CONFIG_GUID, 0ULL) != 0) {
nvlist_free(missing);
goto nomem;
}
child[c] = missing;
}
}
/*
* Put all of this pool's top-level vdevs into a root vdev.
*/
if (nvlist_alloc(&nvroot, NV_UNIQUE_NAME, 0) != 0)
goto nomem;
if (nvlist_add_string(nvroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) != 0 ||
nvlist_add_uint64(nvroot, ZPOOL_CONFIG_ID, 0ULL) != 0 ||
nvlist_add_uint64(nvroot, ZPOOL_CONFIG_GUID, guid) != 0 ||
nvlist_add_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
child, children) != 0) {
nvlist_free(nvroot);
goto nomem;
}
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
children = 0;
child = NULL;
/*
* Go through and fix up any paths and/or devids based on our
* known list of vdev GUID -> path mappings.
*/
if (fix_paths(hdl, nvroot, pl->names) != 0) {
nvlist_free(nvroot);
goto nomem;
}
/*
* Add the root vdev to this pool's configuration.
*/
if (nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
nvroot) != 0) {
nvlist_free(nvroot);
goto nomem;
}
nvlist_free(nvroot);
/*
* zdb uses this path to report on active pools that were
* imported or created using -R.
*/
if (active_ok)
goto add_pool;
/*
* Determine if this pool is currently active, in which case we
* can't actually import it.
*/
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
if (zutil_pool_active(hdl, name, guid, &isactive) != 0)
goto error;
if (isactive) {
nvlist_free(config);
config = NULL;
continue;
}
if (policy != NULL) {
if (nvlist_add_nvlist(config, ZPOOL_LOAD_POLICY,
policy) != 0)
goto nomem;
}
if ((nvl = zutil_refresh_config(hdl, config)) == NULL) {
nvlist_free(config);
config = NULL;
continue;
}
nvlist_free(config);
config = nvl;
/*
* Go through and update the paths for spares, now that we have
* them.
*/
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
for (i = 0; i < nspares; i++) {
if (fix_paths(hdl, spares[i], pl->names) != 0)
goto nomem;
}
}
/*
* Update the paths for l2cache devices.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
for (i = 0; i < nl2cache; i++) {
if (fix_paths(hdl, l2cache[i], pl->names) != 0)
goto nomem;
}
}
/*
* Restore the original information read from the actual label.
*/
(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTID,
DATA_TYPE_UINT64);
(void) nvlist_remove(config, ZPOOL_CONFIG_HOSTNAME,
DATA_TYPE_STRING);
if (hostid != 0) {
verify(nvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID,
hostid) == 0);
verify(nvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME,
hostname) == 0);
}
add_pool:
/*
* Add this pool to the list of configs.
*/
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
if (nvlist_add_nvlist(ret, name, config) != 0)
goto nomem;
nvlist_free(config);
config = NULL;
}
return (ret);
nomem:
(void) zutil_no_memory(hdl);
error:
nvlist_free(config);
nvlist_free(ret);
for (c = 0; c < children; c++)
nvlist_free(child[c]);
free(child);
return (NULL);
}
/*
* Return the offset of the given label.
*/
static uint64_t
label_offset(uint64_t size, int l)
{
ASSERT(P2PHASE_TYPED(size, sizeof (vdev_label_t), uint64_t) == 0);
return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : size - VDEV_LABELS * sizeof (vdev_label_t)));
}
/*
* The same description applies as to zpool_read_label below,
* except here we do it without aio, presumably because an aio call
* errored out in a way we think not using it could circumvent.
*/
static int
zpool_read_label_slow(int fd, nvlist_t **config, int *num_labels)
{
struct stat64 statbuf;
int l, count = 0;
vdev_phys_t *label;
nvlist_t *expected_config = NULL;
uint64_t expected_guid = 0, size;
int error;
*config = NULL;
if (fstat64_blk(fd, &statbuf) == -1)
return (0);
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
error = posix_memalign((void **)&label, PAGESIZE, sizeof (*label));
if (error)
return (-1);
for (l = 0; l < VDEV_LABELS; l++) {
uint64_t state, guid, txg;
off_t offset = label_offset(size, l) + VDEV_SKIP_SIZE;
if (pread64(fd, label, sizeof (vdev_phys_t),
offset) != sizeof (vdev_phys_t))
continue;
if (nvlist_unpack(label->vp_nvlist,
sizeof (label->vp_nvlist), config, 0) != 0)
continue;
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID,
&guid) != 0 || guid == 0) {
nvlist_free(*config);
continue;
}
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 || state > POOL_STATE_L2CACHE) {
nvlist_free(*config);
continue;
}
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0)) {
nvlist_free(*config);
continue;
}
if (expected_guid) {
if (expected_guid == guid)
count++;
nvlist_free(*config);
} else {
expected_config = *config;
expected_guid = guid;
count++;
}
}
if (num_labels != NULL)
*num_labels = count;
free(label);
*config = expected_config;
return (0);
}
/*
* Given a file descriptor, read the label information and return an nvlist
* describing the configuration, if there is one. The number of valid
* labels found will be returned in num_labels when non-NULL.
*/
int
zpool_read_label(int fd, nvlist_t **config, int *num_labels)
{
struct stat64 statbuf;
struct aiocb aiocbs[VDEV_LABELS];
struct aiocb *aiocbps[VDEV_LABELS];
vdev_phys_t *labels;
nvlist_t *expected_config = NULL;
uint64_t expected_guid = 0, size;
int error, l, count = 0;
*config = NULL;
if (fstat64_blk(fd, &statbuf) == -1)
return (0);
size = P2ALIGN_TYPED(statbuf.st_size, sizeof (vdev_label_t), uint64_t);
error = posix_memalign((void **)&labels, PAGESIZE,
VDEV_LABELS * sizeof (*labels));
if (error)
return (-1);
memset(aiocbs, 0, sizeof (aiocbs));
for (l = 0; l < VDEV_LABELS; l++) {
off_t offset = label_offset(size, l) + VDEV_SKIP_SIZE;
aiocbs[l].aio_fildes = fd;
aiocbs[l].aio_offset = offset;
aiocbs[l].aio_buf = &labels[l];
aiocbs[l].aio_nbytes = sizeof (vdev_phys_t);
aiocbs[l].aio_lio_opcode = LIO_READ;
aiocbps[l] = &aiocbs[l];
}
if (lio_listio(LIO_WAIT, aiocbps, VDEV_LABELS, NULL) != 0) {
int saved_errno = errno;
boolean_t do_slow = B_FALSE;
error = -1;
if (errno == EAGAIN || errno == EINTR || errno == EIO) {
/*
* A portion of the requests may have been submitted.
* Clean them up.
*/
for (l = 0; l < VDEV_LABELS; l++) {
errno = 0;
switch (aio_error(&aiocbs[l])) {
case EINVAL:
break;
case EINPROGRESS:
// This shouldn't be possible to
// encounter, die if we do.
ASSERT(B_FALSE);
+ fallthrough;
case EOPNOTSUPP:
case ENOSYS:
do_slow = B_TRUE;
- /* FALLTHROUGH */
+ fallthrough;
case 0:
default:
(void) aio_return(&aiocbs[l]);
}
}
}
if (do_slow) {
/*
* At least some IO involved access unsafe-for-AIO
* files. Let's try again, without AIO this time.
*/
error = zpool_read_label_slow(fd, config, num_labels);
saved_errno = errno;
}
free(labels);
errno = saved_errno;
return (error);
}
for (l = 0; l < VDEV_LABELS; l++) {
uint64_t state, guid, txg;
if (aio_return(&aiocbs[l]) != sizeof (vdev_phys_t))
continue;
if (nvlist_unpack(labels[l].vp_nvlist,
sizeof (labels[l].vp_nvlist), config, 0) != 0)
continue;
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_GUID,
&guid) != 0 || guid == 0) {
nvlist_free(*config);
continue;
}
if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 || state > POOL_STATE_L2CACHE) {
nvlist_free(*config);
continue;
}
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0 || txg == 0)) {
nvlist_free(*config);
continue;
}
if (expected_guid) {
if (expected_guid == guid)
count++;
nvlist_free(*config);
} else {
expected_config = *config;
expected_guid = guid;
count++;
}
}
if (num_labels != NULL)
*num_labels = count;
free(labels);
*config = expected_config;
return (0);
}
/*
* Sorted by full path and then vdev guid to allow for multiple entries with
* the same full path name. This is required because it's possible to
* have multiple block devices with labels that refer to the same
* ZPOOL_CONFIG_PATH yet have different vdev guids. In this case both
* entries need to be added to the cache. Scenarios where this can occur
* include overwritten pool labels, devices which are visible from multiple
* hosts and multipath devices.
*/
int
slice_cache_compare(const void *arg1, const void *arg2)
{
const char *nm1 = ((rdsk_node_t *)arg1)->rn_name;
const char *nm2 = ((rdsk_node_t *)arg2)->rn_name;
uint64_t guid1 = ((rdsk_node_t *)arg1)->rn_vdev_guid;
uint64_t guid2 = ((rdsk_node_t *)arg2)->rn_vdev_guid;
int rv;
rv = TREE_ISIGN(strcmp(nm1, nm2));
if (rv)
return (rv);
return (TREE_CMP(guid1, guid2));
}
static int
label_paths_impl(libpc_handle_t *hdl, nvlist_t *nvroot, uint64_t pool_guid,
uint64_t vdev_guid, char **path, char **devid)
{
nvlist_t **child;
uint_t c, children;
uint64_t guid;
char *val;
int error;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
error = label_paths_impl(hdl, child[c],
pool_guid, vdev_guid, path, devid);
if (error)
return (error);
}
return (0);
}
if (nvroot == NULL)
return (0);
error = nvlist_lookup_uint64(nvroot, ZPOOL_CONFIG_GUID, &guid);
if ((error != 0) || (guid != vdev_guid))
return (0);
error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_PATH, &val);
if (error == 0)
*path = val;
error = nvlist_lookup_string(nvroot, ZPOOL_CONFIG_DEVID, &val);
if (error == 0)
*devid = val;
return (0);
}
/*
* Given a disk label fetch the ZPOOL_CONFIG_PATH and ZPOOL_CONFIG_DEVID
* and store these strings as config_path and devid_path respectively.
* The returned pointers are only valid as long as label remains valid.
*/
int
label_paths(libpc_handle_t *hdl, nvlist_t *label, char **path, char **devid)
{
nvlist_t *nvroot;
uint64_t pool_guid;
uint64_t vdev_guid;
*path = NULL;
*devid = NULL;
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvroot) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID, &pool_guid) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &vdev_guid))
return (ENOENT);
return (label_paths_impl(hdl, nvroot, pool_guid, vdev_guid, path,
devid));
}
static void
zpool_find_import_scan_add_slice(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t *cache, const char *path, const char *name, int order)
{
avl_index_t where;
rdsk_node_t *slice;
slice = zutil_alloc(hdl, sizeof (rdsk_node_t));
if (asprintf(&slice->rn_name, "%s/%s", path, name) == -1) {
free(slice);
return;
}
slice->rn_vdev_guid = 0;
slice->rn_lock = lock;
slice->rn_avl = cache;
slice->rn_hdl = hdl;
slice->rn_order = order + IMPORT_ORDER_SCAN_OFFSET;
slice->rn_labelpaths = B_FALSE;
pthread_mutex_lock(lock);
if (avl_find(cache, slice, &where)) {
free(slice->rn_name);
free(slice);
} else {
avl_insert(cache, slice, where);
}
pthread_mutex_unlock(lock);
}
static int
zpool_find_import_scan_dir(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t *cache, const char *dir, int order)
{
int error;
char path[MAXPATHLEN];
struct dirent64 *dp;
DIR *dirp;
if (realpath(dir, path) == NULL) {
error = errno;
if (error == ENOENT)
return (0);
zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, EZFS_BADPATH, dgettext(
TEXT_DOMAIN, "cannot resolve path '%s'"), dir);
return (error);
}
dirp = opendir(path);
if (dirp == NULL) {
error = errno;
zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, EZFS_BADPATH,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), path);
return (error);
}
while ((dp = readdir64(dirp)) != NULL) {
const char *name = dp->d_name;
if (strcmp(name, ".") == 0 || strcmp(name, "..") == 0)
continue;
switch (dp->d_type) {
case DT_UNKNOWN:
case DT_BLK:
case DT_LNK:
#ifdef __FreeBSD__
case DT_CHR:
#endif
case DT_REG:
break;
default:
continue;
}
zpool_find_import_scan_add_slice(hdl, lock, cache, path, name,
order);
}
(void) closedir(dirp);
return (0);
}
static int
zpool_find_import_scan_path(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t *cache, const char *dir, int order)
{
int error = 0;
char path[MAXPATHLEN];
char *d = NULL;
ssize_t dl;
const char *dpath, *name;
/*
* Separate the directory and the basename.
* We do this so that we can get the realpath of
* the directory. We don't get the realpath on the
* whole path because if it's a symlink, we want the
* path of the symlink not where it points to.
*/
name = zfs_basename(dir);
if ((dl = zfs_dirnamelen(dir)) == -1)
dpath = ".";
else
dpath = d = zutil_strndup(hdl, dir, dl);
if (realpath(dpath, path) == NULL) {
error = errno;
if (error == ENOENT) {
error = 0;
goto out;
}
zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, EZFS_BADPATH, dgettext(
TEXT_DOMAIN, "cannot resolve path '%s'"), dir);
goto out;
}
zpool_find_import_scan_add_slice(hdl, lock, cache, path, name, order);
out:
free(d);
return (error);
}
/*
* Scan a list of directories for zfs devices.
*/
static int
zpool_find_import_scan(libpc_handle_t *hdl, pthread_mutex_t *lock,
avl_tree_t **slice_cache, const char * const *dir, size_t dirs)
{
avl_tree_t *cache;
rdsk_node_t *slice;
void *cookie;
int i, error;
*slice_cache = NULL;
cache = zutil_alloc(hdl, sizeof (avl_tree_t));
avl_create(cache, slice_cache_compare, sizeof (rdsk_node_t),
offsetof(rdsk_node_t, rn_node));
for (i = 0; i < dirs; i++) {
struct stat sbuf;
if (stat(dir[i], &sbuf) != 0) {
error = errno;
if (error == ENOENT)
continue;
zutil_error_aux(hdl, "%s", strerror(error));
(void) zutil_error_fmt(hdl, EZFS_BADPATH, dgettext(
TEXT_DOMAIN, "cannot resolve path '%s'"), dir[i]);
goto error;
}
/*
* If dir[i] is a directory, we walk through it and add all
* the entries to the cache. If it's not a directory, we just
* add it to the cache.
*/
if (S_ISDIR(sbuf.st_mode)) {
if ((error = zpool_find_import_scan_dir(hdl, lock,
cache, dir[i], i)) != 0)
goto error;
} else {
if ((error = zpool_find_import_scan_path(hdl, lock,
cache, dir[i], i)) != 0)
goto error;
}
}
*slice_cache = cache;
return (0);
error:
cookie = NULL;
while ((slice = avl_destroy_nodes(cache, &cookie)) != NULL) {
free(slice->rn_name);
free(slice);
}
free(cache);
return (error);
}
/*
* Given a list of directories to search, find all pools stored on disk. This
* includes partial pools which are not available to import. If no args are
* given (argc is 0), then the default directory (/dev/dsk) is searched.
* poolname or guid (but not both) are provided by the caller when trying
* to import a specific pool.
*/
static nvlist_t *
zpool_find_import_impl(libpc_handle_t *hdl, importargs_t *iarg,
pthread_mutex_t *lock, avl_tree_t *cache)
{
nvlist_t *ret = NULL;
pool_list_t pools = { 0 };
pool_entry_t *pe, *penext;
vdev_entry_t *ve, *venext;
config_entry_t *ce, *cenext;
name_entry_t *ne, *nenext;
rdsk_node_t *slice;
void *cookie;
tpool_t *t;
verify(iarg->poolname == NULL || iarg->guid == 0);
/*
* Create a thread pool to parallelize the process of reading and
* validating labels, a large number of threads can be used due to
* minimal contention.
*/
t = tpool_create(1, 2 * sysconf(_SC_NPROCESSORS_ONLN), 0, NULL);
for (slice = avl_first(cache); slice;
(slice = avl_walk(cache, slice, AVL_AFTER)))
(void) tpool_dispatch(t, zpool_open_func, slice);
tpool_wait(t);
tpool_destroy(t);
/*
* Process the cache, filtering out any entries which are not
* for the specified pool then adding matching label configs.
*/
cookie = NULL;
while ((slice = avl_destroy_nodes(cache, &cookie)) != NULL) {
if (slice->rn_config != NULL) {
nvlist_t *config = slice->rn_config;
boolean_t matched = B_TRUE;
boolean_t aux = B_FALSE;
int fd;
/*
* Check if it's a spare or l2cache device. If it is,
* we need to skip the name and guid check since they
* don't exist on aux device label.
*/
if (iarg->poolname != NULL || iarg->guid != 0) {
uint64_t state;
aux = nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_STATE, &state) == 0 &&
(state == POOL_STATE_SPARE ||
state == POOL_STATE_L2CACHE);
}
if (iarg->poolname != NULL && !aux) {
char *pname;
matched = nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &pname) == 0 &&
strcmp(iarg->poolname, pname) == 0;
} else if (iarg->guid != 0 && !aux) {
uint64_t this_guid;
matched = nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID, &this_guid) == 0 &&
iarg->guid == this_guid;
}
if (matched) {
/*
* Verify all remaining entries can be opened
* exclusively. This will prune all underlying
* multipath devices which otherwise could
* result in the vdev appearing as UNAVAIL.
*
* Under zdb, this step isn't required and
* would prevent a zdb -e of active pools with
* no cachefile.
*/
fd = open(slice->rn_name,
O_RDONLY | O_EXCL | O_CLOEXEC);
if (fd >= 0 || iarg->can_be_active) {
if (fd >= 0)
close(fd);
add_config(hdl, &pools,
slice->rn_name, slice->rn_order,
slice->rn_num_labels, config);
}
}
nvlist_free(config);
}
free(slice->rn_name);
free(slice);
}
avl_destroy(cache);
free(cache);
ret = get_configs(hdl, &pools, iarg->can_be_active, iarg->policy);
for (pe = pools.pools; pe != NULL; pe = penext) {
penext = pe->pe_next;
for (ve = pe->pe_vdevs; ve != NULL; ve = venext) {
venext = ve->ve_next;
for (ce = ve->ve_configs; ce != NULL; ce = cenext) {
cenext = ce->ce_next;
nvlist_free(ce->ce_config);
free(ce);
}
free(ve);
}
free(pe);
}
for (ne = pools.names; ne != NULL; ne = nenext) {
nenext = ne->ne_next;
free(ne->ne_name);
free(ne);
}
return (ret);
}
/*
* Given a config, discover the paths for the devices which
* exist in the config.
*/
static int
discover_cached_paths(libpc_handle_t *hdl, nvlist_t *nv,
avl_tree_t *cache, pthread_mutex_t *lock)
{
char *path = NULL;
ssize_t dl;
uint_t children;
nvlist_t **child;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (int c = 0; c < children; c++) {
discover_cached_paths(hdl, child[c], cache, lock);
}
}
/*
* Once we have the path, we need to add the directory to
* our directory cache.
*/
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
if ((dl = zfs_dirnamelen(path)) == -1)
path = ".";
else
path[dl] = '\0';
return (zpool_find_import_scan_dir(hdl, lock, cache,
path, 0));
}
return (0);
}
/*
* Given a cache file, return the contents as a list of importable pools.
* poolname or guid (but not both) are provided by the caller when trying
* to import a specific pool.
*/
static nvlist_t *
zpool_find_import_cached(libpc_handle_t *hdl, importargs_t *iarg)
{
char *buf;
int fd;
struct stat64 statbuf;
nvlist_t *raw, *src, *dst;
nvlist_t *pools;
nvpair_t *elem;
char *name;
uint64_t this_guid;
boolean_t active;
verify(iarg->poolname == NULL || iarg->guid == 0);
if ((fd = open(iarg->cachefile, O_RDONLY | O_CLOEXEC)) < 0) {
zutil_error_aux(hdl, "%s", strerror(errno));
(void) zutil_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "failed to open cache file"));
return (NULL);
}
if (fstat64(fd, &statbuf) != 0) {
zutil_error_aux(hdl, "%s", strerror(errno));
(void) close(fd);
(void) zutil_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN, "failed to get size of cache file"));
return (NULL);
}
if ((buf = zutil_alloc(hdl, statbuf.st_size)) == NULL) {
(void) close(fd);
return (NULL);
}
if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
(void) close(fd);
free(buf);
(void) zutil_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN,
"failed to read cache file contents"));
return (NULL);
}
(void) close(fd);
if (nvlist_unpack(buf, statbuf.st_size, &raw, 0) != 0) {
free(buf);
(void) zutil_error(hdl, EZFS_BADCACHE,
dgettext(TEXT_DOMAIN,
"invalid or corrupt cache file contents"));
return (NULL);
}
free(buf);
/*
* Go through and get the current state of the pools and refresh their
* state.
*/
if (nvlist_alloc(&pools, 0, 0) != 0) {
(void) zutil_no_memory(hdl);
nvlist_free(raw);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(raw, elem)) != NULL) {
src = fnvpair_value_nvlist(elem);
name = fnvlist_lookup_string(src, ZPOOL_CONFIG_POOL_NAME);
if (iarg->poolname != NULL && strcmp(iarg->poolname, name) != 0)
continue;
this_guid = fnvlist_lookup_uint64(src, ZPOOL_CONFIG_POOL_GUID);
if (iarg->guid != 0 && iarg->guid != this_guid)
continue;
if (zutil_pool_active(hdl, name, this_guid, &active) != 0) {
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
if (active)
continue;
if (iarg->scan) {
uint64_t saved_guid = iarg->guid;
const char *saved_poolname = iarg->poolname;
pthread_mutex_t lock;
/*
* Create the device cache that will hold the
* devices we will scan based on the cachefile.
* This will get destroyed and freed by
* zpool_find_import_impl.
*/
avl_tree_t *cache = zutil_alloc(hdl,
sizeof (avl_tree_t));
avl_create(cache, slice_cache_compare,
sizeof (rdsk_node_t),
offsetof(rdsk_node_t, rn_node));
nvlist_t *nvroot = fnvlist_lookup_nvlist(src,
ZPOOL_CONFIG_VDEV_TREE);
/*
* We only want to find the pool with this_guid.
* We will reset these values back later.
*/
iarg->guid = this_guid;
iarg->poolname = NULL;
/*
* We need to build up a cache of devices that exists
* in the paths pointed to by the cachefile. This allows
* us to preserve the device namespace that was
* originally specified by the user but also lets us
* scan devices in those directories in case they had
* been renamed.
*/
pthread_mutex_init(&lock, NULL);
discover_cached_paths(hdl, nvroot, cache, &lock);
nvlist_t *nv = zpool_find_import_impl(hdl, iarg,
&lock, cache);
pthread_mutex_destroy(&lock);
/*
* zpool_find_import_impl will return back
* a list of pools that it found based on the
* device cache. There should only be one pool
* since we're looking for a specific guid.
* We will use that pool to build up the final
* pool nvlist which is returned back to the
* caller.
*/
nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
fnvlist_add_nvlist(pools, nvpair_name(pair),
fnvpair_value_nvlist(pair));
VERIFY3P(nvlist_next_nvpair(nv, pair), ==, NULL);
iarg->guid = saved_guid;
iarg->poolname = saved_poolname;
continue;
}
if (nvlist_add_string(src, ZPOOL_CONFIG_CACHEFILE,
iarg->cachefile) != 0) {
(void) zutil_no_memory(hdl);
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
if ((dst = zutil_refresh_config(hdl, src)) == NULL) {
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
if (nvlist_add_nvlist(pools, nvpair_name(elem), dst) != 0) {
(void) zutil_no_memory(hdl);
nvlist_free(dst);
nvlist_free(raw);
nvlist_free(pools);
return (NULL);
}
nvlist_free(dst);
}
nvlist_free(raw);
return (pools);
}
static nvlist_t *
zpool_find_import(libpc_handle_t *hdl, importargs_t *iarg)
{
pthread_mutex_t lock;
avl_tree_t *cache;
nvlist_t *pools = NULL;
verify(iarg->poolname == NULL || iarg->guid == 0);
pthread_mutex_init(&lock, NULL);
/*
* Locate pool member vdevs by blkid or by directory scanning.
* On success a newly allocated AVL tree which is populated with an
* entry for each discovered vdev will be returned in the cache.
* It's the caller's responsibility to consume and destroy this tree.
*/
if (iarg->scan || iarg->paths != 0) {
size_t dirs = iarg->paths;
const char * const *dir = (const char * const *)iarg->path;
if (dirs == 0)
dir = zpool_default_search_paths(&dirs);
if (zpool_find_import_scan(hdl, &lock, &cache,
dir, dirs) != 0) {
pthread_mutex_destroy(&lock);
return (NULL);
}
} else {
if (zpool_find_import_blkid(hdl, &lock, &cache) != 0) {
pthread_mutex_destroy(&lock);
return (NULL);
}
}
pools = zpool_find_import_impl(hdl, iarg, &lock, cache);
pthread_mutex_destroy(&lock);
return (pools);
}
nvlist_t *
zpool_search_import(void *hdl, importargs_t *import,
const pool_config_ops_t *pco)
{
libpc_handle_t handle = { 0 };
nvlist_t *pools = NULL;
handle.lpc_lib_handle = hdl;
handle.lpc_ops = pco;
handle.lpc_printerr = B_TRUE;
verify(import->poolname == NULL || import->guid == 0);
if (import->cachefile != NULL)
pools = zpool_find_import_cached(&handle, import);
else
pools = zpool_find_import(&handle, import);
if ((pools == NULL || nvlist_empty(pools)) &&
handle.lpc_open_access_error && geteuid() != 0) {
(void) zutil_error(&handle, EZFS_EACESS, dgettext(TEXT_DOMAIN,
"no pools found"));
}
return (pools);
}
static boolean_t
pool_match(nvlist_t *cfg, char *tgt)
{
uint64_t v, guid = strtoull(tgt, NULL, 0);
char *s;
if (guid != 0) {
if (nvlist_lookup_uint64(cfg, ZPOOL_CONFIG_POOL_GUID, &v) == 0)
return (v == guid);
} else {
if (nvlist_lookup_string(cfg, ZPOOL_CONFIG_POOL_NAME, &s) == 0)
return (strcmp(s, tgt) == 0);
}
return (B_FALSE);
}
int
zpool_find_config(void *hdl, const char *target, nvlist_t **configp,
importargs_t *args, const pool_config_ops_t *pco)
{
nvlist_t *pools;
nvlist_t *match = NULL;
nvlist_t *config = NULL;
char *sepp = NULL;
int count = 0;
char *targetdup = strdup(target);
*configp = NULL;
if ((sepp = strpbrk(targetdup, "/@")) != NULL)
*sepp = '\0';
pools = zpool_search_import(hdl, args, pco);
if (pools != NULL) {
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
VERIFY0(nvpair_value_nvlist(elem, &config));
if (pool_match(config, targetdup)) {
count++;
if (match != NULL) {
/* multiple matches found */
continue;
} else {
match = fnvlist_dup(config);
}
}
}
fnvlist_free(pools);
}
if (count == 0) {
free(targetdup);
return (ENOENT);
}
if (count > 1) {
free(targetdup);
fnvlist_free(match);
return (EINVAL);
}
*configp = match;
free(targetdup);
return (0);
}
diff --git a/sys/contrib/openzfs/module/icp/core/kcf_prov_tabs.c b/sys/contrib/openzfs/module/icp/core/kcf_prov_tabs.c
index 139b6920e1ec..9d303d022517 100644
--- a/sys/contrib/openzfs/module/icp/core/kcf_prov_tabs.c
+++ b/sys/contrib/openzfs/module/icp/core/kcf_prov_tabs.c
@@ -1,645 +1,645 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2008 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* This file is part of the core Kernel Cryptographic Framework.
* It implements the management of tables of Providers. Entries to
* added and removed when cryptographic providers register with
* and unregister from the framework, respectively. The KCF scheduler
* and ioctl pseudo driver call this function to obtain the list
* of available providers.
*
* The provider table is indexed by crypto_provider_id_t. Each
* element of the table contains a pointer to a provider descriptor,
* or NULL if the entry is free.
*
* This file also implements helper functions to allocate and free
* provider descriptors.
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/sched_impl.h>
#include <sys/crypto/spi.h>
#define KCF_MAX_PROVIDERS 512 /* max number of providers */
/*
* Prov_tab is an array of providers which is updated when
* a crypto provider registers with kcf. The provider calls the
* SPI routine, crypto_register_provider(), which in turn calls
* kcf_prov_tab_add_provider().
*
* A provider unregisters by calling crypto_unregister_provider()
* which triggers the removal of the prov_tab entry.
* It also calls kcf_remove_mech_provider().
*
* prov_tab entries are not updated from kcf.conf or by cryptoadm(1M).
*/
static kcf_provider_desc_t **prov_tab = NULL;
static kmutex_t prov_tab_mutex; /* ensure exclusive access to the table */
static uint_t prov_tab_num = 0; /* number of providers in table */
static uint_t prov_tab_max = KCF_MAX_PROVIDERS;
void
kcf_prov_tab_destroy(void)
{
mutex_destroy(&prov_tab_mutex);
if (prov_tab)
kmem_free(prov_tab, prov_tab_max *
sizeof (kcf_provider_desc_t *));
}
/*
* Initialize a mutex and the KCF providers table, prov_tab.
* The providers table is dynamically allocated with prov_tab_max entries.
* Called from kcf module _init().
*/
void
kcf_prov_tab_init(void)
{
mutex_init(&prov_tab_mutex, NULL, MUTEX_DEFAULT, NULL);
prov_tab = kmem_zalloc(prov_tab_max * sizeof (kcf_provider_desc_t *),
KM_SLEEP);
}
/*
* Add a provider to the provider table. If no free entry can be found
* for the new provider, returns CRYPTO_HOST_MEMORY. Otherwise, add
* the provider to the table, initialize the pd_prov_id field
* of the specified provider descriptor to the index in that table,
* and return CRYPTO_SUCCESS. Note that a REFHOLD is done on the
* provider when pointed to by a table entry.
*/
int
kcf_prov_tab_add_provider(kcf_provider_desc_t *prov_desc)
{
uint_t i;
ASSERT(prov_tab != NULL);
mutex_enter(&prov_tab_mutex);
/* find free slot in providers table */
for (i = 1; i < KCF_MAX_PROVIDERS && prov_tab[i] != NULL; i++)
;
if (i == KCF_MAX_PROVIDERS) {
/* ran out of providers entries */
mutex_exit(&prov_tab_mutex);
cmn_err(CE_WARN, "out of providers entries");
return (CRYPTO_HOST_MEMORY);
}
/* initialize entry */
prov_tab[i] = prov_desc;
KCF_PROV_REFHOLD(prov_desc);
KCF_PROV_IREFHOLD(prov_desc);
prov_tab_num++;
mutex_exit(&prov_tab_mutex);
/* update provider descriptor */
prov_desc->pd_prov_id = i;
/*
* The KCF-private provider handle is defined as the internal
* provider id.
*/
prov_desc->pd_kcf_prov_handle =
(crypto_kcf_provider_handle_t)prov_desc->pd_prov_id;
return (CRYPTO_SUCCESS);
}
/*
* Remove the provider specified by its id. A REFRELE is done on the
* corresponding provider descriptor before this function returns.
* Returns CRYPTO_UNKNOWN_PROVIDER if the provider id is not valid.
*/
int
kcf_prov_tab_rem_provider(crypto_provider_id_t prov_id)
{
kcf_provider_desc_t *prov_desc;
ASSERT(prov_tab != NULL);
ASSERT(prov_tab_num >= 0);
/*
* Validate provider id, since it can be specified by a 3rd-party
* provider.
*/
mutex_enter(&prov_tab_mutex);
if (prov_id >= KCF_MAX_PROVIDERS ||
((prov_desc = prov_tab[prov_id]) == NULL)) {
mutex_exit(&prov_tab_mutex);
return (CRYPTO_INVALID_PROVIDER_ID);
}
mutex_exit(&prov_tab_mutex);
/*
* The provider id must remain valid until the associated provider
* descriptor is freed. For this reason, we simply release our
* reference to the descriptor here. When the reference count
* reaches zero, kcf_free_provider_desc() will be invoked and
* the associated entry in the providers table will be released
* at that time.
*/
KCF_PROV_REFRELE(prov_desc);
KCF_PROV_IREFRELE(prov_desc);
return (CRYPTO_SUCCESS);
}
/*
* Returns the provider descriptor corresponding to the specified
* provider id. A REFHOLD is done on the descriptor before it is
* returned to the caller. It is the responsibility of the caller
* to do a REFRELE once it is done with the provider descriptor.
*/
kcf_provider_desc_t *
kcf_prov_tab_lookup(crypto_provider_id_t prov_id)
{
kcf_provider_desc_t *prov_desc;
mutex_enter(&prov_tab_mutex);
prov_desc = prov_tab[prov_id];
if (prov_desc == NULL) {
mutex_exit(&prov_tab_mutex);
return (NULL);
}
KCF_PROV_REFHOLD(prov_desc);
mutex_exit(&prov_tab_mutex);
return (prov_desc);
}
static void
allocate_ops_v1(crypto_ops_t *src, crypto_ops_t *dst, uint_t *mech_list_count)
{
if (src->co_control_ops != NULL)
dst->co_control_ops = kmem_alloc(sizeof (crypto_control_ops_t),
KM_SLEEP);
if (src->co_digest_ops != NULL)
dst->co_digest_ops = kmem_alloc(sizeof (crypto_digest_ops_t),
KM_SLEEP);
if (src->co_cipher_ops != NULL)
dst->co_cipher_ops = kmem_alloc(sizeof (crypto_cipher_ops_t),
KM_SLEEP);
if (src->co_mac_ops != NULL)
dst->co_mac_ops = kmem_alloc(sizeof (crypto_mac_ops_t),
KM_SLEEP);
if (src->co_sign_ops != NULL)
dst->co_sign_ops = kmem_alloc(sizeof (crypto_sign_ops_t),
KM_SLEEP);
if (src->co_verify_ops != NULL)
dst->co_verify_ops = kmem_alloc(sizeof (crypto_verify_ops_t),
KM_SLEEP);
if (src->co_dual_ops != NULL)
dst->co_dual_ops = kmem_alloc(sizeof (crypto_dual_ops_t),
KM_SLEEP);
if (src->co_dual_cipher_mac_ops != NULL)
dst->co_dual_cipher_mac_ops = kmem_alloc(
sizeof (crypto_dual_cipher_mac_ops_t), KM_SLEEP);
if (src->co_random_ops != NULL) {
dst->co_random_ops = kmem_alloc(
sizeof (crypto_random_number_ops_t), KM_SLEEP);
/*
* Allocate storage to store the array of supported mechanisms
* specified by provider. We allocate extra mechanism storage
* if the provider has random_ops since we keep an internal
* mechanism, SUN_RANDOM, in this case.
*/
(*mech_list_count)++;
}
if (src->co_session_ops != NULL)
dst->co_session_ops = kmem_alloc(sizeof (crypto_session_ops_t),
KM_SLEEP);
if (src->co_object_ops != NULL)
dst->co_object_ops = kmem_alloc(sizeof (crypto_object_ops_t),
KM_SLEEP);
if (src->co_key_ops != NULL)
dst->co_key_ops = kmem_alloc(sizeof (crypto_key_ops_t),
KM_SLEEP);
if (src->co_provider_ops != NULL)
dst->co_provider_ops = kmem_alloc(
sizeof (crypto_provider_management_ops_t), KM_SLEEP);
if (src->co_ctx_ops != NULL)
dst->co_ctx_ops = kmem_alloc(sizeof (crypto_ctx_ops_t),
KM_SLEEP);
}
static void
allocate_ops_v2(crypto_ops_t *src, crypto_ops_t *dst)
{
if (src->co_mech_ops != NULL)
dst->co_mech_ops = kmem_alloc(sizeof (crypto_mech_ops_t),
KM_SLEEP);
}
static void
allocate_ops_v3(crypto_ops_t *src, crypto_ops_t *dst)
{
if (src->co_nostore_key_ops != NULL)
dst->co_nostore_key_ops =
kmem_alloc(sizeof (crypto_nostore_key_ops_t), KM_SLEEP);
}
/*
* Allocate a provider descriptor. mech_list_count specifies the
* number of mechanisms supported by the providers, and is used
* to allocate storage for the mechanism table.
* This function may sleep while allocating memory, which is OK
* since it is invoked from user context during provider registration.
*/
kcf_provider_desc_t *
kcf_alloc_provider_desc(crypto_provider_info_t *info)
{
int i, j;
kcf_provider_desc_t *desc;
uint_t mech_list_count = info->pi_mech_list_count;
crypto_ops_t *src_ops = info->pi_ops_vector;
desc = kmem_zalloc(sizeof (kcf_provider_desc_t), KM_SLEEP);
/*
* pd_description serves two purposes
* - Appears as a blank padded PKCS#11 style string, that will be
* returned to applications in CK_SLOT_INFO.slotDescription.
* This means that we should not have a null character in the
* first CRYPTO_PROVIDER_DESCR_MAX_LEN bytes.
* - Appears as a null-terminated string that can be used by
* other kcf routines.
*
* So, we allocate enough room for one extra null terminator
* which keeps every one happy.
*/
desc->pd_description = kmem_alloc(CRYPTO_PROVIDER_DESCR_MAX_LEN + 1,
KM_SLEEP);
(void) memset(desc->pd_description, ' ',
CRYPTO_PROVIDER_DESCR_MAX_LEN);
desc->pd_description[CRYPTO_PROVIDER_DESCR_MAX_LEN] = '\0';
/*
* Since the framework does not require the ops vector specified
* by the providers during registration to be persistent,
* KCF needs to allocate storage where copies of the ops
* vectors are copied.
*/
desc->pd_ops_vector = kmem_zalloc(sizeof (crypto_ops_t), KM_SLEEP);
if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
allocate_ops_v1(src_ops, desc->pd_ops_vector, &mech_list_count);
if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2)
allocate_ops_v2(src_ops, desc->pd_ops_vector);
if (info->pi_interface_version == CRYPTO_SPI_VERSION_3)
allocate_ops_v3(src_ops, desc->pd_ops_vector);
}
desc->pd_mech_list_count = mech_list_count;
desc->pd_mechanisms = kmem_zalloc(sizeof (crypto_mech_info_t) *
mech_list_count, KM_SLEEP);
for (i = 0; i < KCF_OPS_CLASSSIZE; i++)
for (j = 0; j < KCF_MAXMECHTAB; j++)
desc->pd_mech_indx[i][j] = KCF_INVALID_INDX;
desc->pd_prov_id = KCF_PROVID_INVALID;
desc->pd_state = KCF_PROV_ALLOCATED;
mutex_init(&desc->pd_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&desc->pd_resume_cv, NULL, CV_DEFAULT, NULL);
cv_init(&desc->pd_remove_cv, NULL, CV_DEFAULT, NULL);
return (desc);
}
/*
* Called by KCF_PROV_REFRELE when a provider's reference count drops
* to zero. We free the descriptor when the last reference is released.
* However, for software providers, we do not free it when there is an
* unregister thread waiting. We signal that thread in this case and
* that thread is responsible for freeing the descriptor.
*/
void
kcf_provider_zero_refcnt(kcf_provider_desc_t *desc)
{
mutex_enter(&desc->pd_lock);
switch (desc->pd_prov_type) {
case CRYPTO_SW_PROVIDER:
if (desc->pd_state == KCF_PROV_REMOVED ||
desc->pd_state == KCF_PROV_DISABLED) {
desc->pd_state = KCF_PROV_FREED;
cv_broadcast(&desc->pd_remove_cv);
mutex_exit(&desc->pd_lock);
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case CRYPTO_HW_PROVIDER:
case CRYPTO_LOGICAL_PROVIDER:
mutex_exit(&desc->pd_lock);
kcf_free_provider_desc(desc);
}
}
/*
* Free a provider descriptor.
*/
void
kcf_free_provider_desc(kcf_provider_desc_t *desc)
{
if (desc == NULL)
return;
mutex_enter(&prov_tab_mutex);
if (desc->pd_prov_id != KCF_PROVID_INVALID) {
/* release the associated providers table entry */
ASSERT(prov_tab[desc->pd_prov_id] != NULL);
prov_tab[desc->pd_prov_id] = NULL;
prov_tab_num--;
}
mutex_exit(&prov_tab_mutex);
/* free the kernel memory associated with the provider descriptor */
if (desc->pd_description != NULL)
kmem_free(desc->pd_description,
CRYPTO_PROVIDER_DESCR_MAX_LEN + 1);
if (desc->pd_ops_vector != NULL) {
if (desc->pd_ops_vector->co_control_ops != NULL)
kmem_free(desc->pd_ops_vector->co_control_ops,
sizeof (crypto_control_ops_t));
if (desc->pd_ops_vector->co_digest_ops != NULL)
kmem_free(desc->pd_ops_vector->co_digest_ops,
sizeof (crypto_digest_ops_t));
if (desc->pd_ops_vector->co_cipher_ops != NULL)
kmem_free(desc->pd_ops_vector->co_cipher_ops,
sizeof (crypto_cipher_ops_t));
if (desc->pd_ops_vector->co_mac_ops != NULL)
kmem_free(desc->pd_ops_vector->co_mac_ops,
sizeof (crypto_mac_ops_t));
if (desc->pd_ops_vector->co_sign_ops != NULL)
kmem_free(desc->pd_ops_vector->co_sign_ops,
sizeof (crypto_sign_ops_t));
if (desc->pd_ops_vector->co_verify_ops != NULL)
kmem_free(desc->pd_ops_vector->co_verify_ops,
sizeof (crypto_verify_ops_t));
if (desc->pd_ops_vector->co_dual_ops != NULL)
kmem_free(desc->pd_ops_vector->co_dual_ops,
sizeof (crypto_dual_ops_t));
if (desc->pd_ops_vector->co_dual_cipher_mac_ops != NULL)
kmem_free(desc->pd_ops_vector->co_dual_cipher_mac_ops,
sizeof (crypto_dual_cipher_mac_ops_t));
if (desc->pd_ops_vector->co_random_ops != NULL)
kmem_free(desc->pd_ops_vector->co_random_ops,
sizeof (crypto_random_number_ops_t));
if (desc->pd_ops_vector->co_session_ops != NULL)
kmem_free(desc->pd_ops_vector->co_session_ops,
sizeof (crypto_session_ops_t));
if (desc->pd_ops_vector->co_object_ops != NULL)
kmem_free(desc->pd_ops_vector->co_object_ops,
sizeof (crypto_object_ops_t));
if (desc->pd_ops_vector->co_key_ops != NULL)
kmem_free(desc->pd_ops_vector->co_key_ops,
sizeof (crypto_key_ops_t));
if (desc->pd_ops_vector->co_provider_ops != NULL)
kmem_free(desc->pd_ops_vector->co_provider_ops,
sizeof (crypto_provider_management_ops_t));
if (desc->pd_ops_vector->co_ctx_ops != NULL)
kmem_free(desc->pd_ops_vector->co_ctx_ops,
sizeof (crypto_ctx_ops_t));
if (desc->pd_ops_vector->co_mech_ops != NULL)
kmem_free(desc->pd_ops_vector->co_mech_ops,
sizeof (crypto_mech_ops_t));
if (desc->pd_ops_vector->co_nostore_key_ops != NULL)
kmem_free(desc->pd_ops_vector->co_nostore_key_ops,
sizeof (crypto_nostore_key_ops_t));
kmem_free(desc->pd_ops_vector, sizeof (crypto_ops_t));
}
if (desc->pd_mechanisms != NULL)
/* free the memory associated with the mechanism info's */
kmem_free(desc->pd_mechanisms, sizeof (crypto_mech_info_t) *
desc->pd_mech_list_count);
if (desc->pd_sched_info.ks_taskq != NULL)
taskq_destroy(desc->pd_sched_info.ks_taskq);
mutex_destroy(&desc->pd_lock);
cv_destroy(&desc->pd_resume_cv);
cv_destroy(&desc->pd_remove_cv);
kmem_free(desc, sizeof (kcf_provider_desc_t));
}
/*
* Returns an array of hardware and logical provider descriptors,
* a.k.a the PKCS#11 slot list. A REFHOLD is done on each descriptor
* before the array is returned. The entire table can be freed by
* calling kcf_free_provider_tab().
*/
int
kcf_get_slot_list(uint_t *count, kcf_provider_desc_t ***array,
boolean_t unverified)
{
kcf_provider_desc_t *prov_desc;
kcf_provider_desc_t **p = NULL;
char *last;
uint_t cnt = 0;
uint_t i, j;
int rval = CRYPTO_SUCCESS;
size_t n, final_size;
/* count the providers */
mutex_enter(&prov_tab_mutex);
for (i = 0; i < KCF_MAX_PROVIDERS; i++) {
if ((prov_desc = prov_tab[i]) != NULL &&
((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
(prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
if (KCF_IS_PROV_USABLE(prov_desc) ||
(unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
cnt++;
}
}
}
mutex_exit(&prov_tab_mutex);
if (cnt == 0)
goto out;
n = cnt * sizeof (kcf_provider_desc_t *);
again:
p = kmem_zalloc(n, KM_SLEEP);
/* pointer to last entry in the array */
last = (char *)&p[cnt-1];
mutex_enter(&prov_tab_mutex);
/* fill the slot list */
for (i = 0, j = 0; i < KCF_MAX_PROVIDERS; i++) {
if ((prov_desc = prov_tab[i]) != NULL &&
((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER &&
(prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) ||
prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) {
if (KCF_IS_PROV_USABLE(prov_desc) ||
(unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) {
if ((char *)&p[j] > last) {
mutex_exit(&prov_tab_mutex);
kcf_free_provider_tab(cnt, p);
n = n << 1;
cnt = cnt << 1;
goto again;
}
p[j++] = prov_desc;
KCF_PROV_REFHOLD(prov_desc);
}
}
}
mutex_exit(&prov_tab_mutex);
final_size = j * sizeof (kcf_provider_desc_t *);
cnt = j;
ASSERT(final_size <= n);
/* check if buffer we allocated is too large */
if (final_size < n) {
char *final_buffer = NULL;
if (final_size > 0) {
final_buffer = kmem_alloc(final_size, KM_SLEEP);
bcopy(p, final_buffer, final_size);
}
kmem_free(p, n);
p = (kcf_provider_desc_t **)final_buffer;
}
out:
*count = cnt;
*array = p;
return (rval);
}
/*
* Free an array of hardware provider descriptors. A REFRELE
* is done on each descriptor before the table is freed.
*/
void
kcf_free_provider_tab(uint_t count, kcf_provider_desc_t **array)
{
kcf_provider_desc_t *prov_desc;
int i;
for (i = 0; i < count; i++) {
if ((prov_desc = array[i]) != NULL) {
KCF_PROV_REFRELE(prov_desc);
}
}
kmem_free(array, count * sizeof (kcf_provider_desc_t *));
}
/*
* Returns in the location pointed to by pd a pointer to the descriptor
* for the software provider for the specified mechanism.
* The provider descriptor is returned held and it is the caller's
* responsibility to release it when done. The mechanism entry
* is returned if the optional argument mep is non NULL.
*
* Returns one of the CRYPTO_ * error codes on failure, and
* CRYPTO_SUCCESS on success.
*/
int
kcf_get_sw_prov(crypto_mech_type_t mech_type, kcf_provider_desc_t **pd,
kcf_mech_entry_t **mep, boolean_t log_warn)
{
kcf_mech_entry_t *me;
/* get the mechanism entry for this mechanism */
if (kcf_get_mech_entry(mech_type, &me) != KCF_SUCCESS)
return (CRYPTO_MECHANISM_INVALID);
/*
* Get the software provider for this mechanism.
* Lock the mech_entry until we grab the 'pd'.
*/
mutex_enter(&me->me_mutex);
if (me->me_sw_prov == NULL ||
(*pd = me->me_sw_prov->pm_prov_desc) == NULL) {
/* no SW provider for this mechanism */
if (log_warn)
cmn_err(CE_WARN, "no SW provider for \"%s\"\n",
me->me_name);
mutex_exit(&me->me_mutex);
return (CRYPTO_MECH_NOT_SUPPORTED);
}
KCF_PROV_REFHOLD(*pd);
mutex_exit(&me->me_mutex);
if (mep != NULL)
*mep = me;
return (CRYPTO_SUCCESS);
}
diff --git a/sys/contrib/openzfs/module/icp/io/aes.c b/sys/contrib/openzfs/module/icp/io/aes.c
index f77583360235..c47c7567b900 100644
--- a/sys/contrib/openzfs/module/icp/io/aes.c
+++ b/sys/contrib/openzfs/module/icp/io/aes.c
@@ -1,1457 +1,1457 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
*/
/*
* AES provider for the Kernel Cryptographic Framework (KCF)
*/
#include <sys/zfs_context.h>
#include <sys/crypto/common.h>
#include <sys/crypto/impl.h>
#include <sys/crypto/spi.h>
#include <sys/crypto/icp.h>
#include <modes/modes.h>
#include <sys/modctl.h>
#define _AES_IMPL
#include <aes/aes_impl.h>
#include <modes/gcm_impl.h>
#define CRYPTO_PROVIDER_NAME "aes"
extern struct mod_ops mod_cryptoops;
/*
* Module linkage information for the kernel.
*/
static struct modlcrypto modlcrypto = {
&mod_cryptoops,
"AES Kernel SW Provider"
};
static struct modlinkage modlinkage = {
MODREV_1, { (void *)&modlcrypto, NULL }
};
/*
* Mechanism info structure passed to KCF during registration.
*/
static crypto_mech_info_t aes_mech_info_tab[] = {
/* AES_ECB */
{SUN_CKM_AES_ECB, AES_ECB_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
/* AES_CBC */
{SUN_CKM_AES_CBC, AES_CBC_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
/* AES_CTR */
{SUN_CKM_AES_CTR, AES_CTR_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
/* AES_CCM */
{SUN_CKM_AES_CCM, AES_CCM_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
/* AES_GCM */
{SUN_CKM_AES_GCM, AES_GCM_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC,
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES},
/* AES_GMAC */
{SUN_CKM_AES_GMAC, AES_GMAC_MECH_INFO_TYPE,
CRYPTO_FG_ENCRYPT | CRYPTO_FG_ENCRYPT_ATOMIC |
CRYPTO_FG_DECRYPT | CRYPTO_FG_DECRYPT_ATOMIC |
CRYPTO_FG_MAC | CRYPTO_FG_MAC_ATOMIC |
CRYPTO_FG_SIGN | CRYPTO_FG_SIGN_ATOMIC |
CRYPTO_FG_VERIFY | CRYPTO_FG_VERIFY_ATOMIC,
AES_MIN_KEY_BYTES, AES_MAX_KEY_BYTES, CRYPTO_KEYSIZE_UNIT_IN_BYTES}
};
static void aes_provider_status(crypto_provider_handle_t, uint_t *);
static crypto_control_ops_t aes_control_ops = {
aes_provider_status
};
static int aes_encrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
static int aes_decrypt_init(crypto_ctx_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
static int aes_common_init(crypto_ctx_t *, crypto_mechanism_t *,
crypto_key_t *, crypto_spi_ctx_template_t, crypto_req_handle_t, boolean_t);
static int aes_common_init_ctx(aes_ctx_t *, crypto_spi_ctx_template_t *,
crypto_mechanism_t *, crypto_key_t *, int, boolean_t);
static int aes_encrypt_final(crypto_ctx_t *, crypto_data_t *,
crypto_req_handle_t);
static int aes_decrypt_final(crypto_ctx_t *, crypto_data_t *,
crypto_req_handle_t);
static int aes_encrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
crypto_req_handle_t);
static int aes_encrypt_update(crypto_ctx_t *, crypto_data_t *,
crypto_data_t *, crypto_req_handle_t);
static int aes_encrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
static int aes_decrypt(crypto_ctx_t *, crypto_data_t *, crypto_data_t *,
crypto_req_handle_t);
static int aes_decrypt_update(crypto_ctx_t *, crypto_data_t *,
crypto_data_t *, crypto_req_handle_t);
static int aes_decrypt_atomic(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *,
crypto_data_t *, crypto_spi_ctx_template_t, crypto_req_handle_t);
static crypto_cipher_ops_t aes_cipher_ops = {
.encrypt_init = aes_encrypt_init,
.encrypt = aes_encrypt,
.encrypt_update = aes_encrypt_update,
.encrypt_final = aes_encrypt_final,
.encrypt_atomic = aes_encrypt_atomic,
.decrypt_init = aes_decrypt_init,
.decrypt = aes_decrypt,
.decrypt_update = aes_decrypt_update,
.decrypt_final = aes_decrypt_final,
.decrypt_atomic = aes_decrypt_atomic
};
static int aes_mac_atomic(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
crypto_spi_ctx_template_t, crypto_req_handle_t);
static int aes_mac_verify_atomic(crypto_provider_handle_t, crypto_session_id_t,
crypto_mechanism_t *, crypto_key_t *, crypto_data_t *, crypto_data_t *,
crypto_spi_ctx_template_t, crypto_req_handle_t);
static crypto_mac_ops_t aes_mac_ops = {
.mac_init = NULL,
.mac = NULL,
.mac_update = NULL,
.mac_final = NULL,
.mac_atomic = aes_mac_atomic,
.mac_verify_atomic = aes_mac_verify_atomic
};
static int aes_create_ctx_template(crypto_provider_handle_t,
crypto_mechanism_t *, crypto_key_t *, crypto_spi_ctx_template_t *,
size_t *, crypto_req_handle_t);
static int aes_free_context(crypto_ctx_t *);
static crypto_ctx_ops_t aes_ctx_ops = {
.create_ctx_template = aes_create_ctx_template,
.free_context = aes_free_context
};
static crypto_ops_t aes_crypto_ops = {{{{{
&aes_control_ops,
NULL,
&aes_cipher_ops,
&aes_mac_ops,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
NULL,
&aes_ctx_ops
}}}}};
static crypto_provider_info_t aes_prov_info = {{{{
CRYPTO_SPI_VERSION_1,
"AES Software Provider",
CRYPTO_SW_PROVIDER,
NULL,
&aes_crypto_ops,
sizeof (aes_mech_info_tab)/sizeof (crypto_mech_info_t),
aes_mech_info_tab
}}}};
static crypto_kcf_provider_handle_t aes_prov_handle = 0;
static crypto_data_t null_crypto_data = { CRYPTO_DATA_RAW };
int
aes_mod_init(void)
{
int ret;
/* Determine the fastest available implementation. */
aes_impl_init();
gcm_impl_init();
if ((ret = mod_install(&modlinkage)) != 0)
return (ret);
/* Register with KCF. If the registration fails, remove the module. */
if (crypto_register_provider(&aes_prov_info, &aes_prov_handle)) {
(void) mod_remove(&modlinkage);
return (EACCES);
}
return (0);
}
int
aes_mod_fini(void)
{
/* Unregister from KCF if module is registered */
if (aes_prov_handle != 0) {
if (crypto_unregister_provider(aes_prov_handle))
return (EBUSY);
aes_prov_handle = 0;
}
return (mod_remove(&modlinkage));
}
static int
aes_check_mech_param(crypto_mechanism_t *mechanism, aes_ctx_t **ctx, int kmflag)
{
void *p = NULL;
boolean_t param_required = B_TRUE;
size_t param_len;
void *(*alloc_fun)(int);
int rv = CRYPTO_SUCCESS;
switch (mechanism->cm_type) {
case AES_ECB_MECH_INFO_TYPE:
param_required = B_FALSE;
alloc_fun = ecb_alloc_ctx;
break;
case AES_CBC_MECH_INFO_TYPE:
param_len = AES_BLOCK_LEN;
alloc_fun = cbc_alloc_ctx;
break;
case AES_CTR_MECH_INFO_TYPE:
param_len = sizeof (CK_AES_CTR_PARAMS);
alloc_fun = ctr_alloc_ctx;
break;
case AES_CCM_MECH_INFO_TYPE:
param_len = sizeof (CK_AES_CCM_PARAMS);
alloc_fun = ccm_alloc_ctx;
break;
case AES_GCM_MECH_INFO_TYPE:
param_len = sizeof (CK_AES_GCM_PARAMS);
alloc_fun = gcm_alloc_ctx;
break;
case AES_GMAC_MECH_INFO_TYPE:
param_len = sizeof (CK_AES_GMAC_PARAMS);
alloc_fun = gmac_alloc_ctx;
break;
default:
rv = CRYPTO_MECHANISM_INVALID;
return (rv);
}
if (param_required && mechanism->cm_param != NULL &&
mechanism->cm_param_len != param_len) {
rv = CRYPTO_MECHANISM_PARAM_INVALID;
}
if (ctx != NULL) {
p = (alloc_fun)(kmflag);
*ctx = p;
}
return (rv);
}
/*
* Initialize key schedules for AES
*/
static int
init_keysched(crypto_key_t *key, void *newbie)
{
/*
* Only keys by value are supported by this module.
*/
switch (key->ck_format) {
case CRYPTO_KEY_RAW:
if (key->ck_length < AES_MINBITS ||
key->ck_length > AES_MAXBITS) {
return (CRYPTO_KEY_SIZE_RANGE);
}
/* key length must be either 128, 192, or 256 */
if ((key->ck_length & 63) != 0)
return (CRYPTO_KEY_SIZE_RANGE);
break;
default:
return (CRYPTO_KEY_TYPE_INCONSISTENT);
}
aes_init_keysched(key->ck_data, key->ck_length, newbie);
return (CRYPTO_SUCCESS);
}
/*
* KCF software provider control entry points.
*/
/* ARGSUSED */
static void
aes_provider_status(crypto_provider_handle_t provider, uint_t *status)
{
*status = CRYPTO_PROVIDER_READY;
}
static int
aes_encrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_spi_ctx_template_t template,
crypto_req_handle_t req)
{
return (aes_common_init(ctx, mechanism, key, template, req, B_TRUE));
}
static int
aes_decrypt_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_spi_ctx_template_t template,
crypto_req_handle_t req)
{
return (aes_common_init(ctx, mechanism, key, template, req, B_FALSE));
}
/*
* KCF software provider encrypt entry points.
*/
static int
aes_common_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_spi_ctx_template_t template,
crypto_req_handle_t req, boolean_t is_encrypt_init)
{
aes_ctx_t *aes_ctx;
int rv;
int kmflag;
/*
* Only keys by value are supported by this module.
*/
if (key->ck_format != CRYPTO_KEY_RAW) {
return (CRYPTO_KEY_TYPE_INCONSISTENT);
}
kmflag = crypto_kmflag(req);
if ((rv = aes_check_mech_param(mechanism, &aes_ctx, kmflag))
!= CRYPTO_SUCCESS)
return (rv);
rv = aes_common_init_ctx(aes_ctx, template, mechanism, key, kmflag,
is_encrypt_init);
if (rv != CRYPTO_SUCCESS) {
crypto_free_mode_ctx(aes_ctx);
return (rv);
}
ctx->cc_provider_private = aes_ctx;
return (CRYPTO_SUCCESS);
}
static void
aes_copy_block64(uint8_t *in, uint64_t *out)
{
if (IS_P2ALIGNED(in, sizeof (uint64_t))) {
/* LINTED: pointer alignment */
out[0] = *(uint64_t *)&in[0];
/* LINTED: pointer alignment */
out[1] = *(uint64_t *)&in[8];
} else {
uint8_t *iv8 = (uint8_t *)&out[0];
AES_COPY_BLOCK(in, iv8);
}
}
static int
aes_encrypt(crypto_ctx_t *ctx, crypto_data_t *plaintext,
crypto_data_t *ciphertext, crypto_req_handle_t req)
{
int ret = CRYPTO_FAILED;
aes_ctx_t *aes_ctx;
size_t saved_length, saved_offset, length_needed;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
/*
* For block ciphers, plaintext must be a multiple of AES block size.
* This test is only valid for ciphers whose blocksize is a power of 2.
*/
if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE))
== 0) && (plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
return (CRYPTO_DATA_LEN_RANGE);
ASSERT(ciphertext != NULL);
/*
* We need to just return the length needed to store the output.
* We should not destroy the context for the following case.
*/
switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) {
case CCM_MODE:
length_needed = plaintext->cd_length + aes_ctx->ac_mac_len;
break;
case GCM_MODE:
length_needed = plaintext->cd_length + aes_ctx->ac_tag_len;
break;
case GMAC_MODE:
if (plaintext->cd_length != 0)
return (CRYPTO_ARGUMENTS_BAD);
length_needed = aes_ctx->ac_tag_len;
break;
default:
length_needed = plaintext->cd_length;
}
if (ciphertext->cd_length < length_needed) {
ciphertext->cd_length = length_needed;
return (CRYPTO_BUFFER_TOO_SMALL);
}
saved_length = ciphertext->cd_length;
saved_offset = ciphertext->cd_offset;
/*
* Do an update on the specified input data.
*/
ret = aes_encrypt_update(ctx, plaintext, ciphertext, req);
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
/*
* For CCM mode, aes_ccm_encrypt_final() will take care of any
* left-over unprocessed data, and compute the MAC
*/
if (aes_ctx->ac_flags & CCM_MODE) {
/*
* ccm_encrypt_final() will compute the MAC and append
* it to existing ciphertext. So, need to adjust the left over
* length value accordingly
*/
/* order of following 2 lines MUST not be reversed */
ciphertext->cd_offset = ciphertext->cd_length;
ciphertext->cd_length = saved_length - ciphertext->cd_length;
ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, ciphertext,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
if (plaintext != ciphertext) {
ciphertext->cd_length =
ciphertext->cd_offset - saved_offset;
}
ciphertext->cd_offset = saved_offset;
} else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
/*
* gcm_encrypt_final() will compute the MAC and append
* it to existing ciphertext. So, need to adjust the left over
* length value accordingly
*/
/* order of following 2 lines MUST not be reversed */
ciphertext->cd_offset = ciphertext->cd_length;
ciphertext->cd_length = saved_length - ciphertext->cd_length;
ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, ciphertext,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
if (plaintext != ciphertext) {
ciphertext->cd_length =
ciphertext->cd_offset - saved_offset;
}
ciphertext->cd_offset = saved_offset;
}
ASSERT(aes_ctx->ac_remainder_len == 0);
(void) aes_free_context(ctx);
return (ret);
}
static int
aes_decrypt(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
crypto_data_t *plaintext, crypto_req_handle_t req)
{
int ret = CRYPTO_FAILED;
aes_ctx_t *aes_ctx;
off_t saved_offset;
size_t saved_length, length_needed;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
/*
* For block ciphers, plaintext must be a multiple of AES block size.
* This test is only valid for ciphers whose blocksize is a power of 2.
*/
if (((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE))
== 0) && (ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0) {
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
}
ASSERT(plaintext != NULL);
/*
* Return length needed to store the output.
* Do not destroy context when plaintext buffer is too small.
*
* CCM: plaintext is MAC len smaller than cipher text
* GCM: plaintext is TAG len smaller than cipher text
* GMAC: plaintext length must be zero
*/
switch (aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) {
case CCM_MODE:
length_needed = aes_ctx->ac_processed_data_len;
break;
case GCM_MODE:
length_needed = ciphertext->cd_length - aes_ctx->ac_tag_len;
break;
case GMAC_MODE:
if (plaintext->cd_length != 0)
return (CRYPTO_ARGUMENTS_BAD);
length_needed = 0;
break;
default:
length_needed = ciphertext->cd_length;
}
if (plaintext->cd_length < length_needed) {
plaintext->cd_length = length_needed;
return (CRYPTO_BUFFER_TOO_SMALL);
}
saved_offset = plaintext->cd_offset;
saved_length = plaintext->cd_length;
/*
* Do an update on the specified input data.
*/
ret = aes_decrypt_update(ctx, ciphertext, plaintext, req);
if (ret != CRYPTO_SUCCESS) {
goto cleanup;
}
if (aes_ctx->ac_flags & CCM_MODE) {
ASSERT(aes_ctx->ac_processed_data_len == aes_ctx->ac_data_len);
ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len);
/* order of following 2 lines MUST not be reversed */
plaintext->cd_offset = plaintext->cd_length;
plaintext->cd_length = saved_length - plaintext->cd_length;
ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, plaintext,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
if (ret == CRYPTO_SUCCESS) {
if (plaintext != ciphertext) {
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
}
} else {
plaintext->cd_length = saved_length;
}
plaintext->cd_offset = saved_offset;
} else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
/* order of following 2 lines MUST not be reversed */
plaintext->cd_offset = plaintext->cd_length;
plaintext->cd_length = saved_length - plaintext->cd_length;
ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, plaintext,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
if (ret == CRYPTO_SUCCESS) {
if (plaintext != ciphertext) {
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
}
} else {
plaintext->cd_length = saved_length;
}
plaintext->cd_offset = saved_offset;
}
ASSERT(aes_ctx->ac_remainder_len == 0);
cleanup:
(void) aes_free_context(ctx);
return (ret);
}
/* ARGSUSED */
static int
aes_encrypt_update(crypto_ctx_t *ctx, crypto_data_t *plaintext,
crypto_data_t *ciphertext, crypto_req_handle_t req)
{
off_t saved_offset;
size_t saved_length, out_len;
int ret = CRYPTO_SUCCESS;
aes_ctx_t *aes_ctx;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
ASSERT(ciphertext != NULL);
/* compute number of bytes that will hold the ciphertext */
out_len = aes_ctx->ac_remainder_len;
out_len += plaintext->cd_length;
out_len &= ~(AES_BLOCK_LEN - 1);
/* return length needed to store the output */
if (ciphertext->cd_length < out_len) {
ciphertext->cd_length = out_len;
return (CRYPTO_BUFFER_TOO_SMALL);
}
saved_offset = ciphertext->cd_offset;
saved_length = ciphertext->cd_length;
/*
* Do the AES update on the specified input data.
*/
switch (plaintext->cd_format) {
case CRYPTO_DATA_RAW:
ret = crypto_update_iov(ctx->cc_provider_private,
plaintext, ciphertext, aes_encrypt_contiguous_blocks,
aes_copy_block64);
break;
case CRYPTO_DATA_UIO:
ret = crypto_update_uio(ctx->cc_provider_private,
plaintext, ciphertext, aes_encrypt_contiguous_blocks,
aes_copy_block64);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
/*
* Since AES counter mode is a stream cipher, we call
* ctr_mode_final() to pick up any remaining bytes.
* It is an internal function that does not destroy
* the context like *normal* final routines.
*/
if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
ret = ctr_mode_final((ctr_ctx_t *)aes_ctx,
ciphertext, aes_encrypt_block);
}
if (ret == CRYPTO_SUCCESS) {
if (plaintext != ciphertext)
ciphertext->cd_length =
ciphertext->cd_offset - saved_offset;
} else {
ciphertext->cd_length = saved_length;
}
ciphertext->cd_offset = saved_offset;
return (ret);
}
static int
aes_decrypt_update(crypto_ctx_t *ctx, crypto_data_t *ciphertext,
crypto_data_t *plaintext, crypto_req_handle_t req)
{
off_t saved_offset;
size_t saved_length, out_len;
int ret = CRYPTO_SUCCESS;
aes_ctx_t *aes_ctx;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
ASSERT(plaintext != NULL);
/*
* Compute number of bytes that will hold the plaintext.
* This is not necessary for CCM, GCM, and GMAC since these
* mechanisms never return plaintext for update operations.
*/
if ((aes_ctx->ac_flags & (CCM_MODE|GCM_MODE|GMAC_MODE)) == 0) {
out_len = aes_ctx->ac_remainder_len;
out_len += ciphertext->cd_length;
out_len &= ~(AES_BLOCK_LEN - 1);
/* return length needed to store the output */
if (plaintext->cd_length < out_len) {
plaintext->cd_length = out_len;
return (CRYPTO_BUFFER_TOO_SMALL);
}
}
saved_offset = plaintext->cd_offset;
saved_length = plaintext->cd_length;
if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE))
gcm_set_kmflag((gcm_ctx_t *)aes_ctx, crypto_kmflag(req));
/*
* Do the AES update on the specified input data.
*/
switch (ciphertext->cd_format) {
case CRYPTO_DATA_RAW:
ret = crypto_update_iov(ctx->cc_provider_private,
ciphertext, plaintext, aes_decrypt_contiguous_blocks,
aes_copy_block64);
break;
case CRYPTO_DATA_UIO:
ret = crypto_update_uio(ctx->cc_provider_private,
ciphertext, plaintext, aes_decrypt_contiguous_blocks,
aes_copy_block64);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
/*
* Since AES counter mode is a stream cipher, we call
* ctr_mode_final() to pick up any remaining bytes.
* It is an internal function that does not destroy
* the context like *normal* final routines.
*/
if ((aes_ctx->ac_flags & CTR_MODE) && (aes_ctx->ac_remainder_len > 0)) {
ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, plaintext,
aes_encrypt_block);
if (ret == CRYPTO_DATA_LEN_RANGE)
ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
}
if (ret == CRYPTO_SUCCESS) {
if (ciphertext != plaintext)
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
} else {
plaintext->cd_length = saved_length;
}
plaintext->cd_offset = saved_offset;
return (ret);
}
/* ARGSUSED */
static int
aes_encrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
crypto_req_handle_t req)
{
aes_ctx_t *aes_ctx;
int ret;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
if (data->cd_format != CRYPTO_DATA_RAW &&
data->cd_format != CRYPTO_DATA_UIO) {
return (CRYPTO_ARGUMENTS_BAD);
}
if (aes_ctx->ac_flags & CTR_MODE) {
if (aes_ctx->ac_remainder_len > 0) {
ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data,
aes_encrypt_block);
if (ret != CRYPTO_SUCCESS)
return (ret);
}
} else if (aes_ctx->ac_flags & CCM_MODE) {
ret = ccm_encrypt_final((ccm_ctx_t *)aes_ctx, data,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
} else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
size_t saved_offset = data->cd_offset;
ret = gcm_encrypt_final((gcm_ctx_t *)aes_ctx, data,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
data->cd_length = data->cd_offset - saved_offset;
data->cd_offset = saved_offset;
} else {
/*
* There must be no unprocessed plaintext.
* This happens if the length of the last data is
* not a multiple of the AES block length.
*/
if (aes_ctx->ac_remainder_len > 0) {
return (CRYPTO_DATA_LEN_RANGE);
}
data->cd_length = 0;
}
(void) aes_free_context(ctx);
return (CRYPTO_SUCCESS);
}
/* ARGSUSED */
static int
aes_decrypt_final(crypto_ctx_t *ctx, crypto_data_t *data,
crypto_req_handle_t req)
{
aes_ctx_t *aes_ctx;
int ret;
off_t saved_offset;
size_t saved_length;
ASSERT(ctx->cc_provider_private != NULL);
aes_ctx = ctx->cc_provider_private;
if (data->cd_format != CRYPTO_DATA_RAW &&
data->cd_format != CRYPTO_DATA_UIO) {
return (CRYPTO_ARGUMENTS_BAD);
}
/*
* There must be no unprocessed ciphertext.
* This happens if the length of the last ciphertext is
* not a multiple of the AES block length.
*/
if (aes_ctx->ac_remainder_len > 0) {
if ((aes_ctx->ac_flags & CTR_MODE) == 0)
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
else {
ret = ctr_mode_final((ctr_ctx_t *)aes_ctx, data,
aes_encrypt_block);
if (ret == CRYPTO_DATA_LEN_RANGE)
ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
if (ret != CRYPTO_SUCCESS)
return (ret);
}
}
if (aes_ctx->ac_flags & CCM_MODE) {
/*
* This is where all the plaintext is returned, make sure
* the plaintext buffer is big enough
*/
size_t pt_len = aes_ctx->ac_data_len;
if (data->cd_length < pt_len) {
data->cd_length = pt_len;
return (CRYPTO_BUFFER_TOO_SMALL);
}
ASSERT(aes_ctx->ac_processed_data_len == pt_len);
ASSERT(aes_ctx->ac_processed_mac_len == aes_ctx->ac_mac_len);
saved_offset = data->cd_offset;
saved_length = data->cd_length;
ret = ccm_decrypt_final((ccm_ctx_t *)aes_ctx, data,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
if (ret == CRYPTO_SUCCESS) {
data->cd_length = data->cd_offset - saved_offset;
} else {
data->cd_length = saved_length;
}
data->cd_offset = saved_offset;
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
} else if (aes_ctx->ac_flags & (GCM_MODE|GMAC_MODE)) {
/*
* This is where all the plaintext is returned, make sure
* the plaintext buffer is big enough
*/
gcm_ctx_t *ctx = (gcm_ctx_t *)aes_ctx;
size_t pt_len = ctx->gcm_processed_data_len - ctx->gcm_tag_len;
if (data->cd_length < pt_len) {
data->cd_length = pt_len;
return (CRYPTO_BUFFER_TOO_SMALL);
}
saved_offset = data->cd_offset;
saved_length = data->cd_length;
ret = gcm_decrypt_final((gcm_ctx_t *)aes_ctx, data,
AES_BLOCK_LEN, aes_encrypt_block, aes_xor_block);
if (ret == CRYPTO_SUCCESS) {
data->cd_length = data->cd_offset - saved_offset;
} else {
data->cd_length = saved_length;
}
data->cd_offset = saved_offset;
if (ret != CRYPTO_SUCCESS) {
return (ret);
}
}
if ((aes_ctx->ac_flags & (CTR_MODE|CCM_MODE|GCM_MODE|GMAC_MODE)) == 0) {
data->cd_length = 0;
}
(void) aes_free_context(ctx);
return (CRYPTO_SUCCESS);
}
/* ARGSUSED */
static int
aes_encrypt_atomic(crypto_provider_handle_t provider,
crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *plaintext, crypto_data_t *ciphertext,
crypto_spi_ctx_template_t template, crypto_req_handle_t req)
{
aes_ctx_t aes_ctx; /* on the stack */
off_t saved_offset;
size_t saved_length;
size_t length_needed;
int ret;
ASSERT(ciphertext != NULL);
/*
* CTR, CCM, GCM, and GMAC modes do not require that plaintext
* be a multiple of AES block size.
*/
switch (mechanism->cm_type) {
case AES_CTR_MECH_INFO_TYPE:
case AES_CCM_MECH_INFO_TYPE:
case AES_GCM_MECH_INFO_TYPE:
case AES_GMAC_MECH_INFO_TYPE:
break;
default:
if ((plaintext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
return (CRYPTO_DATA_LEN_RANGE);
}
if ((ret = aes_check_mech_param(mechanism, NULL, 0)) != CRYPTO_SUCCESS)
return (ret);
bzero(&aes_ctx, sizeof (aes_ctx_t));
ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
crypto_kmflag(req), B_TRUE);
if (ret != CRYPTO_SUCCESS)
return (ret);
switch (mechanism->cm_type) {
case AES_CCM_MECH_INFO_TYPE:
length_needed = plaintext->cd_length + aes_ctx.ac_mac_len;
break;
case AES_GMAC_MECH_INFO_TYPE:
if (plaintext->cd_length != 0)
return (CRYPTO_ARGUMENTS_BAD);
- /* FALLTHROUGH */
+ fallthrough;
case AES_GCM_MECH_INFO_TYPE:
length_needed = plaintext->cd_length + aes_ctx.ac_tag_len;
break;
default:
length_needed = plaintext->cd_length;
}
/* return size of buffer needed to store output */
if (ciphertext->cd_length < length_needed) {
ciphertext->cd_length = length_needed;
ret = CRYPTO_BUFFER_TOO_SMALL;
goto out;
}
saved_offset = ciphertext->cd_offset;
saved_length = ciphertext->cd_length;
/*
* Do an update on the specified input data.
*/
switch (plaintext->cd_format) {
case CRYPTO_DATA_RAW:
ret = crypto_update_iov(&aes_ctx, plaintext, ciphertext,
aes_encrypt_contiguous_blocks, aes_copy_block64);
break;
case CRYPTO_DATA_UIO:
ret = crypto_update_uio(&aes_ctx, plaintext, ciphertext,
aes_encrypt_contiguous_blocks, aes_copy_block64);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
if (ret == CRYPTO_SUCCESS) {
if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) {
ret = ccm_encrypt_final((ccm_ctx_t *)&aes_ctx,
ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
aes_xor_block);
if (ret != CRYPTO_SUCCESS)
goto out;
ASSERT(aes_ctx.ac_remainder_len == 0);
} else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) {
ret = gcm_encrypt_final((gcm_ctx_t *)&aes_ctx,
ciphertext, AES_BLOCK_LEN, aes_encrypt_block,
aes_copy_block, aes_xor_block);
if (ret != CRYPTO_SUCCESS)
goto out;
ASSERT(aes_ctx.ac_remainder_len == 0);
} else if (mechanism->cm_type == AES_CTR_MECH_INFO_TYPE) {
if (aes_ctx.ac_remainder_len > 0) {
ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx,
ciphertext, aes_encrypt_block);
if (ret != CRYPTO_SUCCESS)
goto out;
}
} else {
ASSERT(aes_ctx.ac_remainder_len == 0);
}
if (plaintext != ciphertext) {
ciphertext->cd_length =
ciphertext->cd_offset - saved_offset;
}
} else {
ciphertext->cd_length = saved_length;
}
ciphertext->cd_offset = saved_offset;
out:
if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
}
#ifdef CAN_USE_GCM_ASM
if (aes_ctx.ac_flags & (GCM_MODE|GMAC_MODE) &&
((gcm_ctx_t *)&aes_ctx)->gcm_Htable != NULL) {
gcm_ctx_t *ctx = (gcm_ctx_t *)&aes_ctx;
bzero(ctx->gcm_Htable, ctx->gcm_htab_len);
kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len);
}
#endif
return (ret);
}
/* ARGSUSED */
static int
aes_decrypt_atomic(crypto_provider_handle_t provider,
crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *ciphertext, crypto_data_t *plaintext,
crypto_spi_ctx_template_t template, crypto_req_handle_t req)
{
aes_ctx_t aes_ctx; /* on the stack */
off_t saved_offset;
size_t saved_length;
size_t length_needed;
int ret;
ASSERT(plaintext != NULL);
/*
* CCM, GCM, CTR, and GMAC modes do not require that ciphertext
* be a multiple of AES block size.
*/
switch (mechanism->cm_type) {
case AES_CTR_MECH_INFO_TYPE:
case AES_CCM_MECH_INFO_TYPE:
case AES_GCM_MECH_INFO_TYPE:
case AES_GMAC_MECH_INFO_TYPE:
break;
default:
if ((ciphertext->cd_length & (AES_BLOCK_LEN - 1)) != 0)
return (CRYPTO_ENCRYPTED_DATA_LEN_RANGE);
}
if ((ret = aes_check_mech_param(mechanism, NULL, 0)) != CRYPTO_SUCCESS)
return (ret);
bzero(&aes_ctx, sizeof (aes_ctx_t));
ret = aes_common_init_ctx(&aes_ctx, template, mechanism, key,
crypto_kmflag(req), B_FALSE);
if (ret != CRYPTO_SUCCESS)
return (ret);
switch (mechanism->cm_type) {
case AES_CCM_MECH_INFO_TYPE:
length_needed = aes_ctx.ac_data_len;
break;
case AES_GCM_MECH_INFO_TYPE:
length_needed = ciphertext->cd_length - aes_ctx.ac_tag_len;
break;
case AES_GMAC_MECH_INFO_TYPE:
if (plaintext->cd_length != 0)
return (CRYPTO_ARGUMENTS_BAD);
length_needed = 0;
break;
default:
length_needed = ciphertext->cd_length;
}
/* return size of buffer needed to store output */
if (plaintext->cd_length < length_needed) {
plaintext->cd_length = length_needed;
ret = CRYPTO_BUFFER_TOO_SMALL;
goto out;
}
saved_offset = plaintext->cd_offset;
saved_length = plaintext->cd_length;
if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE)
gcm_set_kmflag((gcm_ctx_t *)&aes_ctx, crypto_kmflag(req));
/*
* Do an update on the specified input data.
*/
switch (ciphertext->cd_format) {
case CRYPTO_DATA_RAW:
ret = crypto_update_iov(&aes_ctx, ciphertext, plaintext,
aes_decrypt_contiguous_blocks, aes_copy_block64);
break;
case CRYPTO_DATA_UIO:
ret = crypto_update_uio(&aes_ctx, ciphertext, plaintext,
aes_decrypt_contiguous_blocks, aes_copy_block64);
break;
default:
ret = CRYPTO_ARGUMENTS_BAD;
}
if (ret == CRYPTO_SUCCESS) {
if (mechanism->cm_type == AES_CCM_MECH_INFO_TYPE) {
ASSERT(aes_ctx.ac_processed_data_len
== aes_ctx.ac_data_len);
ASSERT(aes_ctx.ac_processed_mac_len
== aes_ctx.ac_mac_len);
ret = ccm_decrypt_final((ccm_ctx_t *)&aes_ctx,
plaintext, AES_BLOCK_LEN, aes_encrypt_block,
aes_copy_block, aes_xor_block);
ASSERT(aes_ctx.ac_remainder_len == 0);
if ((ret == CRYPTO_SUCCESS) &&
(ciphertext != plaintext)) {
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
} else {
plaintext->cd_length = saved_length;
}
} else if (mechanism->cm_type == AES_GCM_MECH_INFO_TYPE ||
mechanism->cm_type == AES_GMAC_MECH_INFO_TYPE) {
ret = gcm_decrypt_final((gcm_ctx_t *)&aes_ctx,
plaintext, AES_BLOCK_LEN, aes_encrypt_block,
aes_xor_block);
ASSERT(aes_ctx.ac_remainder_len == 0);
if ((ret == CRYPTO_SUCCESS) &&
(ciphertext != plaintext)) {
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
} else {
plaintext->cd_length = saved_length;
}
} else if (mechanism->cm_type != AES_CTR_MECH_INFO_TYPE) {
ASSERT(aes_ctx.ac_remainder_len == 0);
if (ciphertext != plaintext)
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
} else {
if (aes_ctx.ac_remainder_len > 0) {
ret = ctr_mode_final((ctr_ctx_t *)&aes_ctx,
plaintext, aes_encrypt_block);
if (ret == CRYPTO_DATA_LEN_RANGE)
ret = CRYPTO_ENCRYPTED_DATA_LEN_RANGE;
if (ret != CRYPTO_SUCCESS)
goto out;
}
if (ciphertext != plaintext)
plaintext->cd_length =
plaintext->cd_offset - saved_offset;
}
} else {
plaintext->cd_length = saved_length;
}
plaintext->cd_offset = saved_offset;
out:
if (aes_ctx.ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
bzero(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
kmem_free(aes_ctx.ac_keysched, aes_ctx.ac_keysched_len);
}
if (aes_ctx.ac_flags & CCM_MODE) {
if (aes_ctx.ac_pt_buf != NULL) {
vmem_free(aes_ctx.ac_pt_buf, aes_ctx.ac_data_len);
}
} else if (aes_ctx.ac_flags & (GCM_MODE|GMAC_MODE)) {
if (((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf != NULL) {
vmem_free(((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf,
((gcm_ctx_t *)&aes_ctx)->gcm_pt_buf_len);
}
#ifdef CAN_USE_GCM_ASM
if (((gcm_ctx_t *)&aes_ctx)->gcm_Htable != NULL) {
gcm_ctx_t *ctx = (gcm_ctx_t *)&aes_ctx;
bzero(ctx->gcm_Htable, ctx->gcm_htab_len);
kmem_free(ctx->gcm_Htable, ctx->gcm_htab_len);
}
#endif
}
return (ret);
}
/*
* KCF software provider context template entry points.
*/
/* ARGSUSED */
static int
aes_create_ctx_template(crypto_provider_handle_t provider,
crypto_mechanism_t *mechanism, crypto_key_t *key,
crypto_spi_ctx_template_t *tmpl, size_t *tmpl_size, crypto_req_handle_t req)
{
void *keysched;
size_t size;
int rv;
if (mechanism->cm_type != AES_ECB_MECH_INFO_TYPE &&
mechanism->cm_type != AES_CBC_MECH_INFO_TYPE &&
mechanism->cm_type != AES_CTR_MECH_INFO_TYPE &&
mechanism->cm_type != AES_CCM_MECH_INFO_TYPE &&
mechanism->cm_type != AES_GCM_MECH_INFO_TYPE &&
mechanism->cm_type != AES_GMAC_MECH_INFO_TYPE)
return (CRYPTO_MECHANISM_INVALID);
if ((keysched = aes_alloc_keysched(&size,
crypto_kmflag(req))) == NULL) {
return (CRYPTO_HOST_MEMORY);
}
/*
* Initialize key schedule. Key length information is stored
* in the key.
*/
if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) {
bzero(keysched, size);
kmem_free(keysched, size);
return (rv);
}
*tmpl = keysched;
*tmpl_size = size;
return (CRYPTO_SUCCESS);
}
static int
aes_free_context(crypto_ctx_t *ctx)
{
aes_ctx_t *aes_ctx = ctx->cc_provider_private;
if (aes_ctx != NULL) {
if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
ASSERT(aes_ctx->ac_keysched_len != 0);
bzero(aes_ctx->ac_keysched, aes_ctx->ac_keysched_len);
kmem_free(aes_ctx->ac_keysched,
aes_ctx->ac_keysched_len);
}
crypto_free_mode_ctx(aes_ctx);
ctx->cc_provider_private = NULL;
}
return (CRYPTO_SUCCESS);
}
static int
aes_common_init_ctx(aes_ctx_t *aes_ctx, crypto_spi_ctx_template_t *template,
crypto_mechanism_t *mechanism, crypto_key_t *key, int kmflag,
boolean_t is_encrypt_init)
{
int rv = CRYPTO_SUCCESS;
void *keysched;
size_t size = 0;
if (template == NULL) {
if ((keysched = aes_alloc_keysched(&size, kmflag)) == NULL)
return (CRYPTO_HOST_MEMORY);
/*
* Initialize key schedule.
* Key length is stored in the key.
*/
if ((rv = init_keysched(key, keysched)) != CRYPTO_SUCCESS) {
kmem_free(keysched, size);
return (rv);
}
aes_ctx->ac_flags |= PROVIDER_OWNS_KEY_SCHEDULE;
aes_ctx->ac_keysched_len = size;
} else {
keysched = template;
}
aes_ctx->ac_keysched = keysched;
switch (mechanism->cm_type) {
case AES_CBC_MECH_INFO_TYPE:
rv = cbc_init_ctx((cbc_ctx_t *)aes_ctx, mechanism->cm_param,
mechanism->cm_param_len, AES_BLOCK_LEN, aes_copy_block64);
break;
case AES_CTR_MECH_INFO_TYPE: {
CK_AES_CTR_PARAMS *pp;
if (mechanism->cm_param == NULL ||
mechanism->cm_param_len != sizeof (CK_AES_CTR_PARAMS)) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
pp = (CK_AES_CTR_PARAMS *)(void *)mechanism->cm_param;
rv = ctr_init_ctx((ctr_ctx_t *)aes_ctx, pp->ulCounterBits,
pp->cb, aes_copy_block);
break;
}
case AES_CCM_MECH_INFO_TYPE:
if (mechanism->cm_param == NULL ||
mechanism->cm_param_len != sizeof (CK_AES_CCM_PARAMS)) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
rv = ccm_init_ctx((ccm_ctx_t *)aes_ctx, mechanism->cm_param,
kmflag, is_encrypt_init, AES_BLOCK_LEN, aes_encrypt_block,
aes_xor_block);
break;
case AES_GCM_MECH_INFO_TYPE:
if (mechanism->cm_param == NULL ||
mechanism->cm_param_len != sizeof (CK_AES_GCM_PARAMS)) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
rv = gcm_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
break;
case AES_GMAC_MECH_INFO_TYPE:
if (mechanism->cm_param == NULL ||
mechanism->cm_param_len != sizeof (CK_AES_GMAC_PARAMS)) {
return (CRYPTO_MECHANISM_PARAM_INVALID);
}
rv = gmac_init_ctx((gcm_ctx_t *)aes_ctx, mechanism->cm_param,
AES_BLOCK_LEN, aes_encrypt_block, aes_copy_block,
aes_xor_block);
break;
case AES_ECB_MECH_INFO_TYPE:
aes_ctx->ac_flags |= ECB_MODE;
}
if (rv != CRYPTO_SUCCESS) {
if (aes_ctx->ac_flags & PROVIDER_OWNS_KEY_SCHEDULE) {
bzero(keysched, size);
kmem_free(keysched, size);
}
}
return (rv);
}
static int
process_gmac_mech(crypto_mechanism_t *mech, crypto_data_t *data,
CK_AES_GCM_PARAMS *gcm_params)
{
/* LINTED: pointer alignment */
CK_AES_GMAC_PARAMS *params = (CK_AES_GMAC_PARAMS *)mech->cm_param;
if (mech->cm_type != AES_GMAC_MECH_INFO_TYPE)
return (CRYPTO_MECHANISM_INVALID);
if (mech->cm_param_len != sizeof (CK_AES_GMAC_PARAMS))
return (CRYPTO_MECHANISM_PARAM_INVALID);
if (params->pIv == NULL)
return (CRYPTO_MECHANISM_PARAM_INVALID);
gcm_params->pIv = params->pIv;
gcm_params->ulIvLen = AES_GMAC_IV_LEN;
gcm_params->ulTagBits = AES_GMAC_TAG_BITS;
if (data == NULL)
return (CRYPTO_SUCCESS);
if (data->cd_format != CRYPTO_DATA_RAW)
return (CRYPTO_ARGUMENTS_BAD);
gcm_params->pAAD = (uchar_t *)data->cd_raw.iov_base;
gcm_params->ulAADLen = data->cd_length;
return (CRYPTO_SUCCESS);
}
static int
aes_mac_atomic(crypto_provider_handle_t provider,
crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
crypto_spi_ctx_template_t template, crypto_req_handle_t req)
{
CK_AES_GCM_PARAMS gcm_params;
crypto_mechanism_t gcm_mech;
int rv;
if ((rv = process_gmac_mech(mechanism, data, &gcm_params))
!= CRYPTO_SUCCESS)
return (rv);
gcm_mech.cm_type = AES_GCM_MECH_INFO_TYPE;
gcm_mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS);
gcm_mech.cm_param = (char *)&gcm_params;
return (aes_encrypt_atomic(provider, session_id, &gcm_mech,
key, &null_crypto_data, mac, template, req));
}
static int
aes_mac_verify_atomic(crypto_provider_handle_t provider,
crypto_session_id_t session_id, crypto_mechanism_t *mechanism,
crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac,
crypto_spi_ctx_template_t template, crypto_req_handle_t req)
{
CK_AES_GCM_PARAMS gcm_params;
crypto_mechanism_t gcm_mech;
int rv;
if ((rv = process_gmac_mech(mechanism, data, &gcm_params))
!= CRYPTO_SUCCESS)
return (rv);
gcm_mech.cm_type = AES_GCM_MECH_INFO_TYPE;
gcm_mech.cm_param_len = sizeof (CK_AES_GCM_PARAMS);
gcm_mech.cm_param = (char *)&gcm_params;
return (aes_decrypt_atomic(provider, session_id, &gcm_mech,
key, mac, &null_crypto_data, template, req));
}
diff --git a/sys/contrib/openzfs/module/lua/lcode.c b/sys/contrib/openzfs/module/lua/lcode.c
index ae9a3d91d810..4d88c792a281 100644
--- a/sys/contrib/openzfs/module/lua/lcode.c
+++ b/sys/contrib/openzfs/module/lua/lcode.c
@@ -1,884 +1,888 @@
/* BEGIN CSTYLED */
/*
** $Id: lcode.c,v 2.62.1.1 2013/04/12 18:48:47 roberto Exp $
** Code generator for Lua
** See Copyright Notice in lua.h
*/
#define lcode_c
#define LUA_CORE
+#if defined(HAVE_IMPLICIT_FALLTHROUGH)
+#pragma GCC diagnostic ignored "-Wimplicit-fallthrough"
+#endif
+
#include <sys/lua/lua.h>
#include "lcode.h"
#include "ldebug.h"
#include "ldo.h"
#include "lgc.h"
#include "llex.h"
#include "lmem.h"
#include "lobject.h"
#include "lopcodes.h"
#include "lparser.h"
#include "lstring.h"
#include "ltable.h"
#include "lvm.h"
#define hasjumps(e) ((e)->t != (e)->f)
static int isnumeral(expdesc *e) {
return (e->k == VKNUM && e->t == NO_JUMP && e->f == NO_JUMP);
}
void luaK_nil (FuncState *fs, int from, int n) {
Instruction *previous;
int l = from + n - 1; /* last register to set nil */
if (fs->pc > fs->lasttarget) { /* no jumps to current position? */
previous = &fs->f->code[fs->pc-1];
if (GET_OPCODE(*previous) == OP_LOADNIL) {
int pfrom = GETARG_A(*previous);
int pl = pfrom + GETARG_B(*previous);
if ((pfrom <= from && from <= pl + 1) ||
(from <= pfrom && pfrom <= l + 1)) { /* can connect both? */
if (pfrom < from) from = pfrom; /* from = min(from, pfrom) */
if (pl > l) l = pl; /* l = max(l, pl) */
SETARG_A(*previous, from);
SETARG_B(*previous, l - from);
return;
}
} /* else go through */
}
luaK_codeABC(fs, OP_LOADNIL, from, n - 1, 0); /* else no optimization */
}
int luaK_jump (FuncState *fs) {
int jpc = fs->jpc; /* save list of jumps to here */
int j;
fs->jpc = NO_JUMP;
j = luaK_codeAsBx(fs, OP_JMP, 0, NO_JUMP);
luaK_concat(fs, &j, jpc); /* keep them on hold */
return j;
}
void luaK_ret (FuncState *fs, int first, int nret) {
luaK_codeABC(fs, OP_RETURN, first, nret+1, 0);
}
static int condjump (FuncState *fs, OpCode op, int A, int B, int C) {
luaK_codeABC(fs, op, A, B, C);
return luaK_jump(fs);
}
static void fixjump (FuncState *fs, int pc, int dest) {
Instruction *jmp = &fs->f->code[pc];
int offset = dest-(pc+1);
lua_assert(dest != NO_JUMP);
if (abs(offset) > MAXARG_sBx)
luaX_syntaxerror(fs->ls, "control structure too long");
SETARG_sBx(*jmp, offset);
}
/*
** returns current `pc' and marks it as a jump target (to avoid wrong
** optimizations with consecutive instructions not in the same basic block).
*/
int luaK_getlabel (FuncState *fs) {
fs->lasttarget = fs->pc;
return fs->pc;
}
static int getjump (FuncState *fs, int pc) {
int offset = GETARG_sBx(fs->f->code[pc]);
if (offset == NO_JUMP) /* point to itself represents end of list */
return NO_JUMP; /* end of list */
else
return (pc+1)+offset; /* turn offset into absolute position */
}
static Instruction *getjumpcontrol (FuncState *fs, int pc) {
Instruction *pi = &fs->f->code[pc];
if (pc >= 1 && testTMode(GET_OPCODE(*(pi-1))))
return pi-1;
else
return pi;
}
/*
** check whether list has any jump that do not produce a value
** (or produce an inverted value)
*/
static int need_value (FuncState *fs, int list) {
for (; list != NO_JUMP; list = getjump(fs, list)) {
Instruction i = *getjumpcontrol(fs, list);
if (GET_OPCODE(i) != OP_TESTSET) return 1;
}
return 0; /* not found */
}
static int patchtestreg (FuncState *fs, int node, int reg) {
Instruction *i = getjumpcontrol(fs, node);
if (GET_OPCODE(*i) != OP_TESTSET)
return 0; /* cannot patch other instructions */
if (reg != NO_REG && reg != GETARG_B(*i))
SETARG_A(*i, reg);
else /* no register to put value or register already has the value */
*i = CREATE_ABC(OP_TEST, GETARG_B(*i), 0, GETARG_C(*i));
return 1;
}
static void removevalues (FuncState *fs, int list) {
for (; list != NO_JUMP; list = getjump(fs, list))
patchtestreg(fs, list, NO_REG);
}
static void patchlistaux (FuncState *fs, int list, int vtarget, int reg,
int dtarget) {
while (list != NO_JUMP) {
int next = getjump(fs, list);
if (patchtestreg(fs, list, reg))
fixjump(fs, list, vtarget);
else
fixjump(fs, list, dtarget); /* jump to default target */
list = next;
}
}
static void dischargejpc (FuncState *fs) {
patchlistaux(fs, fs->jpc, fs->pc, NO_REG, fs->pc);
fs->jpc = NO_JUMP;
}
void luaK_patchlist (FuncState *fs, int list, int target) {
if (target == fs->pc)
luaK_patchtohere(fs, list);
else {
lua_assert(target < fs->pc);
patchlistaux(fs, list, target, NO_REG, target);
}
}
LUAI_FUNC void luaK_patchclose (FuncState *fs, int list, int level) {
level++; /* argument is +1 to reserve 0 as non-op */
while (list != NO_JUMP) {
int next = getjump(fs, list);
lua_assert(GET_OPCODE(fs->f->code[list]) == OP_JMP &&
(GETARG_A(fs->f->code[list]) == 0 ||
GETARG_A(fs->f->code[list]) >= level));
SETARG_A(fs->f->code[list], level);
list = next;
}
}
void luaK_patchtohere (FuncState *fs, int list) {
luaK_getlabel(fs);
luaK_concat(fs, &fs->jpc, list);
}
void luaK_concat (FuncState *fs, int *l1, int l2) {
if (l2 == NO_JUMP) return;
else if (*l1 == NO_JUMP)
*l1 = l2;
else {
int list = *l1;
int next;
while ((next = getjump(fs, list)) != NO_JUMP) /* find last element */
list = next;
fixjump(fs, list, l2);
}
}
static int luaK_code (FuncState *fs, Instruction i) {
Proto *f = fs->f;
dischargejpc(fs); /* `pc' will change */
/* put new instruction in code array */
luaM_growvector(fs->ls->L, f->code, fs->pc, f->sizecode, Instruction,
MAX_INT, "opcodes");
f->code[fs->pc] = i;
/* save corresponding line information */
luaM_growvector(fs->ls->L, f->lineinfo, fs->pc, f->sizelineinfo, int,
MAX_INT, "opcodes");
f->lineinfo[fs->pc] = fs->ls->lastline;
return fs->pc++;
}
int luaK_codeABC (FuncState *fs, OpCode o, int a, int b, int c) {
lua_assert(getOpMode(o) == iABC);
lua_assert(getBMode(o) != OpArgN || b == 0);
lua_assert(getCMode(o) != OpArgN || c == 0);
lua_assert(a <= MAXARG_A && b <= MAXARG_B && c <= MAXARG_C);
return luaK_code(fs, CREATE_ABC(o, a, b, c));
}
int luaK_codeABx (FuncState *fs, OpCode o, int a, unsigned int bc) {
lua_assert(getOpMode(o) == iABx || getOpMode(o) == iAsBx);
lua_assert(getCMode(o) == OpArgN);
lua_assert(a <= MAXARG_A && bc <= MAXARG_Bx);
return luaK_code(fs, CREATE_ABx(o, a, bc));
}
static int codeextraarg (FuncState *fs, int a) {
lua_assert(a <= MAXARG_Ax);
return luaK_code(fs, CREATE_Ax(OP_EXTRAARG, a));
}
int luaK_codek (FuncState *fs, int reg, int k) {
if (k <= MAXARG_Bx)
return luaK_codeABx(fs, OP_LOADK, reg, k);
else {
int p = luaK_codeABx(fs, OP_LOADKX, reg, 0);
codeextraarg(fs, k);
return p;
}
}
void luaK_checkstack (FuncState *fs, int n) {
int newstack = fs->freereg + n;
if (newstack > fs->f->maxstacksize) {
if (newstack >= MAXSTACK)
luaX_syntaxerror(fs->ls, "function or expression too complex");
fs->f->maxstacksize = cast_byte(newstack);
}
}
void luaK_reserveregs (FuncState *fs, int n) {
luaK_checkstack(fs, n);
fs->freereg += n;
}
static void freereg (FuncState *fs, int reg) {
if (!ISK(reg) && reg >= fs->nactvar) {
fs->freereg--;
lua_assert(reg == fs->freereg);
}
}
static void freeexp (FuncState *fs, expdesc *e) {
if (e->k == VNONRELOC)
freereg(fs, e->u.info);
}
static int addk (FuncState *fs, TValue *key, TValue *v) {
lua_State *L = fs->ls->L;
TValue *idx = luaH_set(L, fs->h, key);
Proto *f = fs->f;
int k, oldsize;
if (ttisnumber(idx)) {
lua_Number n = nvalue(idx);
lua_number2int(k, n);
if (luaV_rawequalobj(&f->k[k], v))
return k;
/* else may be a collision (e.g., between 0.0 and "\0\0\0\0\0\0\0\0");
go through and create a new entry for this value */
}
/* constant not found; create a new entry */
oldsize = f->sizek;
k = fs->nk;
/* numerical value does not need GC barrier;
table has no metatable, so it does not need to invalidate cache */
setnvalue(idx, cast_num(k));
luaM_growvector(L, f->k, k, f->sizek, TValue, MAXARG_Ax, "constants");
while (oldsize < f->sizek) setnilvalue(&f->k[oldsize++]);
setobj(L, &f->k[k], v);
fs->nk++;
luaC_barrier(L, f, v);
return k;
}
int luaK_stringK (FuncState *fs, TString *s) {
TValue o;
setsvalue(fs->ls->L, &o, s);
return addk(fs, &o, &o);
}
int luaK_numberK (FuncState *fs, lua_Number r) {
int n;
lua_State *L = fs->ls->L;
TValue o;
setnvalue(&o, r);
if (r == 0 || luai_numisnan(NULL, r)) { /* handle -0 and NaN */
/* use raw representation as key to avoid numeric problems */
setsvalue(L, L->top++, luaS_newlstr(L, (char *)&r, sizeof(r)));
n = addk(fs, L->top - 1, &o);
L->top--;
}
else
n = addk(fs, &o, &o); /* regular case */
return n;
}
static int boolK (FuncState *fs, int b) {
TValue o;
setbvalue(&o, b);
return addk(fs, &o, &o);
}
static int nilK (FuncState *fs) {
TValue k, v;
setnilvalue(&v);
/* cannot use nil as key; instead use table itself to represent nil */
sethvalue(fs->ls->L, &k, fs->h);
return addk(fs, &k, &v);
}
void luaK_setreturns (FuncState *fs, expdesc *e, int nresults) {
if (e->k == VCALL) { /* expression is an open function call? */
SETARG_C(getcode(fs, e), nresults+1);
}
else if (e->k == VVARARG) {
SETARG_B(getcode(fs, e), nresults+1);
SETARG_A(getcode(fs, e), fs->freereg);
luaK_reserveregs(fs, 1);
}
}
void luaK_setoneret (FuncState *fs, expdesc *e) {
if (e->k == VCALL) { /* expression is an open function call? */
e->k = VNONRELOC;
e->u.info = GETARG_A(getcode(fs, e));
}
else if (e->k == VVARARG) {
SETARG_B(getcode(fs, e), 2);
e->k = VRELOCABLE; /* can relocate its simple result */
}
}
void luaK_dischargevars (FuncState *fs, expdesc *e) {
switch (e->k) {
case VLOCAL: {
e->k = VNONRELOC;
break;
}
case VUPVAL: {
e->u.info = luaK_codeABC(fs, OP_GETUPVAL, 0, e->u.info, 0);
e->k = VRELOCABLE;
break;
}
case VINDEXED: {
OpCode op = OP_GETTABUP; /* assume 't' is in an upvalue */
freereg(fs, e->u.ind.idx);
if (e->u.ind.vt == VLOCAL) { /* 't' is in a register? */
freereg(fs, e->u.ind.t);
op = OP_GETTABLE;
}
e->u.info = luaK_codeABC(fs, op, 0, e->u.ind.t, e->u.ind.idx);
e->k = VRELOCABLE;
break;
}
case VVARARG:
case VCALL: {
luaK_setoneret(fs, e);
break;
}
default: break; /* there is one value available (somewhere) */
}
}
static int code_label (FuncState *fs, int A, int b, int jump) {
luaK_getlabel(fs); /* those instructions may be jump targets */
return luaK_codeABC(fs, OP_LOADBOOL, A, b, jump);
}
static void discharge2reg (FuncState *fs, expdesc *e, int reg) {
luaK_dischargevars(fs, e);
switch (e->k) {
case VNIL: {
luaK_nil(fs, reg, 1);
break;
}
case VFALSE: case VTRUE: {
luaK_codeABC(fs, OP_LOADBOOL, reg, e->k == VTRUE, 0);
break;
}
case VK: {
luaK_codek(fs, reg, e->u.info);
break;
}
case VKNUM: {
luaK_codek(fs, reg, luaK_numberK(fs, e->u.nval));
break;
}
case VRELOCABLE: {
Instruction *pc = &getcode(fs, e);
SETARG_A(*pc, reg);
break;
}
case VNONRELOC: {
if (reg != e->u.info)
luaK_codeABC(fs, OP_MOVE, reg, e->u.info, 0);
break;
}
default: {
lua_assert(e->k == VVOID || e->k == VJMP);
return; /* nothing to do... */
}
}
e->u.info = reg;
e->k = VNONRELOC;
}
static void discharge2anyreg (FuncState *fs, expdesc *e) {
if (e->k != VNONRELOC) {
luaK_reserveregs(fs, 1);
discharge2reg(fs, e, fs->freereg-1);
}
}
static void exp2reg (FuncState *fs, expdesc *e, int reg) {
discharge2reg(fs, e, reg);
if (e->k == VJMP)
luaK_concat(fs, &e->t, e->u.info); /* put this jump in `t' list */
if (hasjumps(e)) {
int final; /* position after whole expression */
int p_f = NO_JUMP; /* position of an eventual LOAD false */
int p_t = NO_JUMP; /* position of an eventual LOAD true */
if (need_value(fs, e->t) || need_value(fs, e->f)) {
int fj = (e->k == VJMP) ? NO_JUMP : luaK_jump(fs);
p_f = code_label(fs, reg, 0, 1);
p_t = code_label(fs, reg, 1, 0);
luaK_patchtohere(fs, fj);
}
final = luaK_getlabel(fs);
patchlistaux(fs, e->f, final, reg, p_f);
patchlistaux(fs, e->t, final, reg, p_t);
}
e->f = e->t = NO_JUMP;
e->u.info = reg;
e->k = VNONRELOC;
}
void luaK_exp2nextreg (FuncState *fs, expdesc *e) {
luaK_dischargevars(fs, e);
freeexp(fs, e);
luaK_reserveregs(fs, 1);
exp2reg(fs, e, fs->freereg - 1);
}
int luaK_exp2anyreg (FuncState *fs, expdesc *e) {
luaK_dischargevars(fs, e);
if (e->k == VNONRELOC) {
if (!hasjumps(e)) return e->u.info; /* exp is already in a register */
if (e->u.info >= fs->nactvar) { /* reg. is not a local? */
exp2reg(fs, e, e->u.info); /* put value on it */
return e->u.info;
}
}
luaK_exp2nextreg(fs, e); /* default */
return e->u.info;
}
void luaK_exp2anyregup (FuncState *fs, expdesc *e) {
if (e->k != VUPVAL || hasjumps(e))
luaK_exp2anyreg(fs, e);
}
void luaK_exp2val (FuncState *fs, expdesc *e) {
if (hasjumps(e))
luaK_exp2anyreg(fs, e);
else
luaK_dischargevars(fs, e);
}
int luaK_exp2RK (FuncState *fs, expdesc *e) {
luaK_exp2val(fs, e);
switch (e->k) {
case VTRUE:
case VFALSE:
case VNIL: {
if (fs->nk <= MAXINDEXRK) { /* constant fits in RK operand? */
e->u.info = (e->k == VNIL) ? nilK(fs) : boolK(fs, (e->k == VTRUE));
e->k = VK;
return RKASK(e->u.info);
}
else break;
}
case VKNUM: {
e->u.info = luaK_numberK(fs, e->u.nval);
e->k = VK;
/* go through */
}
case VK: {
if (e->u.info <= MAXINDEXRK) /* constant fits in argC? */
return RKASK(e->u.info);
else break;
}
default: break;
}
/* not a constant in the right range: put it in a register */
return luaK_exp2anyreg(fs, e);
}
void luaK_storevar (FuncState *fs, expdesc *var, expdesc *ex) {
switch (var->k) {
case VLOCAL: {
freeexp(fs, ex);
exp2reg(fs, ex, var->u.info);
return;
}
case VUPVAL: {
int e = luaK_exp2anyreg(fs, ex);
luaK_codeABC(fs, OP_SETUPVAL, e, var->u.info, 0);
break;
}
case VINDEXED: {
OpCode op = (var->u.ind.vt == VLOCAL) ? OP_SETTABLE : OP_SETTABUP;
int e = luaK_exp2RK(fs, ex);
luaK_codeABC(fs, op, var->u.ind.t, var->u.ind.idx, e);
break;
}
default: {
lua_assert(0); /* invalid var kind to store */
break;
}
}
freeexp(fs, ex);
}
void luaK_self (FuncState *fs, expdesc *e, expdesc *key) {
int ereg;
luaK_exp2anyreg(fs, e);
ereg = e->u.info; /* register where 'e' was placed */
freeexp(fs, e);
e->u.info = fs->freereg; /* base register for op_self */
e->k = VNONRELOC;
luaK_reserveregs(fs, 2); /* function and 'self' produced by op_self */
luaK_codeABC(fs, OP_SELF, e->u.info, ereg, luaK_exp2RK(fs, key));
freeexp(fs, key);
}
static void invertjump (FuncState *fs, expdesc *e) {
Instruction *pc = getjumpcontrol(fs, e->u.info);
lua_assert(testTMode(GET_OPCODE(*pc)) && GET_OPCODE(*pc) != OP_TESTSET &&
GET_OPCODE(*pc) != OP_TEST);
SETARG_A(*pc, !(GETARG_A(*pc)));
}
static int jumponcond (FuncState *fs, expdesc *e, int cond) {
if (e->k == VRELOCABLE) {
Instruction ie = getcode(fs, e);
if (GET_OPCODE(ie) == OP_NOT) {
fs->pc--; /* remove previous OP_NOT */
return condjump(fs, OP_TEST, GETARG_B(ie), 0, !cond);
}
/* else go through */
}
discharge2anyreg(fs, e);
freeexp(fs, e);
return condjump(fs, OP_TESTSET, NO_REG, e->u.info, cond);
}
void luaK_goiftrue (FuncState *fs, expdesc *e) {
int pc; /* pc of last jump */
luaK_dischargevars(fs, e);
switch (e->k) {
case VJMP: {
invertjump(fs, e);
pc = e->u.info;
break;
}
case VK: case VKNUM: case VTRUE: {
pc = NO_JUMP; /* always true; do nothing */
break;
}
default: {
pc = jumponcond(fs, e, 0);
break;
}
}
luaK_concat(fs, &e->f, pc); /* insert last jump in `f' list */
luaK_patchtohere(fs, e->t);
e->t = NO_JUMP;
}
void luaK_goiffalse (FuncState *fs, expdesc *e) {
int pc; /* pc of last jump */
luaK_dischargevars(fs, e);
switch (e->k) {
case VJMP: {
pc = e->u.info;
break;
}
case VNIL: case VFALSE: {
pc = NO_JUMP; /* always false; do nothing */
break;
}
default: {
pc = jumponcond(fs, e, 1);
break;
}
}
luaK_concat(fs, &e->t, pc); /* insert last jump in `t' list */
luaK_patchtohere(fs, e->f);
e->f = NO_JUMP;
}
static void codenot (FuncState *fs, expdesc *e) {
luaK_dischargevars(fs, e);
switch (e->k) {
case VNIL: case VFALSE: {
e->k = VTRUE;
break;
}
case VK: case VKNUM: case VTRUE: {
e->k = VFALSE;
break;
}
case VJMP: {
invertjump(fs, e);
break;
}
case VRELOCABLE:
case VNONRELOC: {
discharge2anyreg(fs, e);
freeexp(fs, e);
e->u.info = luaK_codeABC(fs, OP_NOT, 0, e->u.info, 0);
e->k = VRELOCABLE;
break;
}
default: {
lua_assert(0); /* cannot happen */
break;
}
}
/* interchange true and false lists */
{ int temp = e->f; e->f = e->t; e->t = temp; }
removevalues(fs, e->f);
removevalues(fs, e->t);
}
void luaK_indexed (FuncState *fs, expdesc *t, expdesc *k) {
lua_assert(!hasjumps(t));
t->u.ind.t = t->u.info;
t->u.ind.idx = luaK_exp2RK(fs, k);
t->u.ind.vt = (t->k == VUPVAL) ? VUPVAL
: check_exp(vkisinreg(t->k), VLOCAL);
t->k = VINDEXED;
}
static int constfolding (OpCode op, expdesc *e1, expdesc *e2) {
lua_Number r;
if (!isnumeral(e1) || !isnumeral(e2)) return 0;
if ((op == OP_DIV || op == OP_MOD) && e2->u.nval == 0)
return 0; /* do not attempt to divide by 0 */
/*
* Patched: check for MIN_INT / -1
*/
if (op == OP_DIV && e1->u.nval == INT64_MIN && e2->u.nval == -1)
return 0;
r = luaO_arith(op - OP_ADD + LUA_OPADD, e1->u.nval, e2->u.nval);
e1->u.nval = r;
return 1;
}
static void codearith (FuncState *fs, OpCode op,
expdesc *e1, expdesc *e2, int line) {
if (constfolding(op, e1, e2))
return;
else {
int o2 = (op != OP_UNM && op != OP_LEN) ? luaK_exp2RK(fs, e2) : 0;
int o1 = luaK_exp2RK(fs, e1);
if (o1 > o2) {
freeexp(fs, e1);
freeexp(fs, e2);
}
else {
freeexp(fs, e2);
freeexp(fs, e1);
}
e1->u.info = luaK_codeABC(fs, op, 0, o1, o2);
e1->k = VRELOCABLE;
luaK_fixline(fs, line);
}
}
static void codecomp (FuncState *fs, OpCode op, int cond, expdesc *e1,
expdesc *e2) {
int o1 = luaK_exp2RK(fs, e1);
int o2 = luaK_exp2RK(fs, e2);
freeexp(fs, e2);
freeexp(fs, e1);
if (cond == 0 && op != OP_EQ) {
int temp; /* exchange args to replace by `<' or `<=' */
temp = o1; o1 = o2; o2 = temp; /* o1 <==> o2 */
cond = 1;
}
e1->u.info = condjump(fs, op, cond, o1, o2);
e1->k = VJMP;
}
void luaK_prefix (FuncState *fs, UnOpr op, expdesc *e, int line) {
expdesc e2;
e2.t = e2.f = NO_JUMP; e2.k = VKNUM; e2.u.nval = 0;
switch (op) {
case OPR_MINUS: {
if (isnumeral(e)) /* minus constant? */
e->u.nval = luai_numunm(NULL, e->u.nval); /* fold it */
else {
luaK_exp2anyreg(fs, e);
codearith(fs, OP_UNM, e, &e2, line);
}
break;
}
case OPR_NOT: codenot(fs, e); break;
case OPR_LEN: {
luaK_exp2anyreg(fs, e); /* cannot operate on constants */
codearith(fs, OP_LEN, e, &e2, line);
break;
}
default: lua_assert(0);
}
}
void luaK_infix (FuncState *fs, BinOpr op, expdesc *v) {
switch (op) {
case OPR_AND: {
luaK_goiftrue(fs, v);
break;
}
case OPR_OR: {
luaK_goiffalse(fs, v);
break;
}
case OPR_CONCAT: {
luaK_exp2nextreg(fs, v); /* operand must be on the `stack' */
break;
}
case OPR_ADD: case OPR_SUB: case OPR_MUL: case OPR_DIV:
case OPR_MOD: case OPR_POW: {
if (!isnumeral(v)) luaK_exp2RK(fs, v);
break;
}
default: {
luaK_exp2RK(fs, v);
break;
}
}
}
void luaK_posfix (FuncState *fs, BinOpr op,
expdesc *e1, expdesc *e2, int line) {
switch (op) {
case OPR_AND: {
lua_assert(e1->t == NO_JUMP); /* list must be closed */
luaK_dischargevars(fs, e2);
luaK_concat(fs, &e2->f, e1->f);
*e1 = *e2;
break;
}
case OPR_OR: {
lua_assert(e1->f == NO_JUMP); /* list must be closed */
luaK_dischargevars(fs, e2);
luaK_concat(fs, &e2->t, e1->t);
*e1 = *e2;
break;
}
case OPR_CONCAT: {
luaK_exp2val(fs, e2);
if (e2->k == VRELOCABLE && GET_OPCODE(getcode(fs, e2)) == OP_CONCAT) {
lua_assert(e1->u.info == GETARG_B(getcode(fs, e2))-1);
freeexp(fs, e1);
SETARG_B(getcode(fs, e2), e1->u.info);
e1->k = VRELOCABLE; e1->u.info = e2->u.info;
}
else {
luaK_exp2nextreg(fs, e2); /* operand must be on the 'stack' */
codearith(fs, OP_CONCAT, e1, e2, line);
}
break;
}
case OPR_ADD: case OPR_SUB: case OPR_MUL: case OPR_DIV:
case OPR_MOD: case OPR_POW: {
codearith(fs, cast(OpCode, op - OPR_ADD + OP_ADD), e1, e2, line);
break;
}
case OPR_EQ: case OPR_LT: case OPR_LE: {
codecomp(fs, cast(OpCode, op - OPR_EQ + OP_EQ), 1, e1, e2);
break;
}
case OPR_NE: case OPR_GT: case OPR_GE: {
codecomp(fs, cast(OpCode, op - OPR_NE + OP_EQ), 0, e1, e2);
break;
}
default: lua_assert(0);
}
}
void luaK_fixline (FuncState *fs, int line) {
fs->f->lineinfo[fs->pc - 1] = line;
}
void luaK_setlist (FuncState *fs, int base, int nelems, int tostore) {
int c = (nelems - 1)/LFIELDS_PER_FLUSH + 1;
int b = (tostore == LUA_MULTRET) ? 0 : tostore;
lua_assert(tostore != 0);
if (c <= MAXARG_C)
luaK_codeABC(fs, OP_SETLIST, base, b, c);
else if (c <= MAXARG_Ax) {
luaK_codeABC(fs, OP_SETLIST, base, b, 0);
codeextraarg(fs, c);
}
else
luaX_syntaxerror(fs->ls, "constructor too long");
fs->freereg = base + 1; /* free registers with list values */
}
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/lua/lgc.c b/sys/contrib/openzfs/module/lua/lgc.c
index 55feb24119d3..227ad723a0b8 100644
--- a/sys/contrib/openzfs/module/lua/lgc.c
+++ b/sys/contrib/openzfs/module/lua/lgc.c
@@ -1,1218 +1,1218 @@
/* BEGIN CSTYLED */
/*
** $Id: lgc.c,v 2.140.1.3 2014/09/01 16:55:08 roberto Exp $
** Garbage Collector
** See Copyright Notice in lua.h
*/
#define lgc_c
#define LUA_CORE
#include <sys/lua/lua.h>
#include "ldebug.h"
#include "ldo.h"
#include "lfunc.h"
#include "lgc.h"
#include "lmem.h"
#include "lobject.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "ltm.h"
/*
** cost of sweeping one element (the size of a small object divided
** by some adjust for the sweep speed)
*/
#define GCSWEEPCOST ((sizeof(TString) + 4) / 4)
/* maximum number of elements to sweep in each single step */
#define GCSWEEPMAX (cast_int((GCSTEPSIZE / GCSWEEPCOST) / 4))
/* maximum number of finalizers to call in each GC step */
#define GCFINALIZENUM 4
/*
** macro to adjust 'stepmul': 'stepmul' is actually used like
** 'stepmul / STEPMULADJ' (value chosen by tests)
*/
#define STEPMULADJ 200
/*
** macro to adjust 'pause': 'pause' is actually used like
** 'pause / PAUSEADJ' (value chosen by tests)
*/
#define PAUSEADJ 100
/*
** 'makewhite' erases all color bits plus the old bit and then
** sets only the current white bit
*/
#define maskcolors (~(bit2mask(BLACKBIT, OLDBIT) | WHITEBITS))
#define makewhite(g,x) \
(gch(x)->marked = cast_byte((gch(x)->marked & maskcolors) | luaC_white(g)))
#define white2gray(x) resetbits(gch(x)->marked, WHITEBITS)
#define black2gray(x) resetbit(gch(x)->marked, BLACKBIT)
#define isfinalized(x) testbit(gch(x)->marked, FINALIZEDBIT)
#define checkdeadkey(n) lua_assert(!ttisdeadkey(gkey(n)) || ttisnil(gval(n)))
#define checkconsistency(obj) \
lua_longassert(!iscollectable(obj) || righttt(obj))
#define markvalue(g,o) { checkconsistency(o); \
if (valiswhite(o)) reallymarkobject(g,gcvalue(o)); }
#define markobject(g,t) { if ((t) && iswhite(obj2gco(t))) \
reallymarkobject(g, obj2gco(t)); }
static void reallymarkobject (global_State *g, GCObject *o);
/*
** {======================================================
** Generic functions
** =======================================================
*/
/*
** one after last element in a hash array
*/
#define gnodelast(h) gnode(h, cast(size_t, sizenode(h)))
/*
** link table 'h' into list pointed by 'p'
*/
#define linktable(h,p) ((h)->gclist = *(p), *(p) = obj2gco(h))
/*
** if key is not marked, mark its entry as dead (therefore removing it
** from the table)
*/
static void removeentry (Node *n) {
lua_assert(ttisnil(gval(n)));
if (valiswhite(gkey(n)))
setdeadvalue(gkey(n)); /* unused and unmarked key; remove it */
}
/*
** tells whether a key or value can be cleared from a weak
** table. Non-collectable objects are never removed from weak
** tables. Strings behave as `values', so are never removed too. for
** other objects: if really collected, cannot keep them; for objects
** being finalized, keep them in keys, but not in values
*/
static int iscleared (global_State *g, const TValue *o) {
if (!iscollectable(o)) return 0;
else if (ttisstring(o)) {
markobject(g, rawtsvalue(o)); /* strings are `values', so are never weak */
return 0;
}
else return iswhite(gcvalue(o));
}
/*
** barrier that moves collector forward, that is, mark the white object
** being pointed by a black object.
*/
void luaC_barrier_ (lua_State *L, GCObject *o, GCObject *v) {
global_State *g = G(L);
lua_assert(isblack(o) && iswhite(v) && !isdead(g, v) && !isdead(g, o));
lua_assert(g->gcstate != GCSpause);
lua_assert(gch(o)->tt != LUA_TTABLE);
if (keepinvariantout(g)) /* must keep invariant? */
reallymarkobject(g, v); /* restore invariant */
else { /* sweep phase */
lua_assert(issweepphase(g));
makewhite(g, o); /* mark main obj. as white to avoid other barriers */
}
}
/*
** barrier that moves collector backward, that is, mark the black object
** pointing to a white object as gray again. (Current implementation
** only works for tables; access to 'gclist' is not uniform across
** different types.)
*/
void luaC_barrierback_ (lua_State *L, GCObject *o) {
global_State *g = G(L);
lua_assert(isblack(o) && !isdead(g, o) && gch(o)->tt == LUA_TTABLE);
black2gray(o); /* make object gray (again) */
gco2t(o)->gclist = g->grayagain;
g->grayagain = o;
}
/*
** barrier for prototypes. When creating first closure (cache is
** NULL), use a forward barrier; this may be the only closure of the
** prototype (if it is a "regular" function, with a single instance)
** and the prototype may be big, so it is better to avoid traversing
** it again. Otherwise, use a backward barrier, to avoid marking all
** possible instances.
*/
LUAI_FUNC void luaC_barrierproto_ (lua_State *L, Proto *p, Closure *c) {
global_State *g = G(L);
lua_assert(isblack(obj2gco(p)));
if (p->cache == NULL) { /* first time? */
luaC_objbarrier(L, p, c);
}
else { /* use a backward barrier */
black2gray(obj2gco(p)); /* make prototype gray (again) */
p->gclist = g->grayagain;
g->grayagain = obj2gco(p);
}
}
/*
** check color (and invariants) for an upvalue that was closed,
** i.e., moved into the 'allgc' list
*/
void luaC_checkupvalcolor (global_State *g, UpVal *uv) {
GCObject *o = obj2gco(uv);
lua_assert(!isblack(o)); /* open upvalues are never black */
if (isgray(o)) {
if (keepinvariant(g)) {
resetoldbit(o); /* see MOVE OLD rule */
gray2black(o); /* it is being visited now */
markvalue(g, uv->v);
}
else {
lua_assert(issweepphase(g));
makewhite(g, o);
}
}
}
/*
** create a new collectable object (with given type and size) and link
** it to '*list'. 'offset' tells how many bytes to allocate before the
** object itself (used only by states).
*/
GCObject *luaC_newobj (lua_State *L, int tt, size_t sz, GCObject **list,
int offset) {
global_State *g = G(L);
char *raw = cast(char *, luaM_newobject(L, novariant(tt), sz));
GCObject *o = obj2gco(raw + offset);
if (list == NULL)
list = &g->allgc; /* standard list for collectable objects */
gch(o)->marked = luaC_white(g);
gch(o)->tt = tt;
gch(o)->next = *list;
*list = o;
return o;
}
/* }====================================================== */
/*
** {======================================================
** Mark functions
** =======================================================
*/
/*
** mark an object. Userdata, strings, and closed upvalues are visited
** and turned black here. Other objects are marked gray and added
** to appropriate list to be visited (and turned black) later. (Open
** upvalues are already linked in 'headuv' list.)
*/
static void reallymarkobject (global_State *g, GCObject *o) {
lu_mem size;
white2gray(o);
switch (gch(o)->tt) {
case LUA_TSHRSTR:
case LUA_TLNGSTR: {
size = sizestring(gco2ts(o));
break; /* nothing else to mark; make it black */
}
case LUA_TUSERDATA: {
Table *mt = gco2u(o)->metatable;
markobject(g, mt);
markobject(g, gco2u(o)->env);
size = sizeudata(gco2u(o));
break;
}
case LUA_TUPVAL: {
UpVal *uv = gco2uv(o);
markvalue(g, uv->v);
if (uv->v != &uv->u.value) /* open? */
return; /* open upvalues remain gray */
size = sizeof(UpVal);
break;
}
case LUA_TLCL: {
gco2lcl(o)->gclist = g->gray;
g->gray = o;
return;
}
case LUA_TCCL: {
gco2ccl(o)->gclist = g->gray;
g->gray = o;
return;
}
case LUA_TTABLE: {
linktable(gco2t(o), &g->gray);
return;
}
case LUA_TTHREAD: {
gco2th(o)->gclist = g->gray;
g->gray = o;
return;
}
case LUA_TPROTO: {
gco2p(o)->gclist = g->gray;
g->gray = o;
return;
}
default: lua_assert(0); return;
}
gray2black(o);
g->GCmemtrav += size;
}
/*
** mark metamethods for basic types
*/
static void markmt (global_State *g) {
int i;
for (i=0; i < LUA_NUMTAGS; i++)
markobject(g, g->mt[i]);
}
/*
** mark all objects in list of being-finalized
*/
static void markbeingfnz (global_State *g) {
GCObject *o;
for (o = g->tobefnz; o != NULL; o = gch(o)->next) {
makewhite(g, o);
reallymarkobject(g, o);
}
}
/*
** mark all values stored in marked open upvalues. (See comment in
** 'lstate.h'.)
*/
static void remarkupvals (global_State *g) {
UpVal *uv;
for (uv = g->uvhead.u.l.next; uv != &g->uvhead; uv = uv->u.l.next) {
if (isgray(obj2gco(uv)))
markvalue(g, uv->v);
}
}
/*
** mark root set and reset all gray lists, to start a new
** incremental (or full) collection
*/
static void restartcollection (global_State *g) {
g->gray = g->grayagain = NULL;
g->weak = g->allweak = g->ephemeron = NULL;
markobject(g, g->mainthread);
markvalue(g, &g->l_registry);
markmt(g);
markbeingfnz(g); /* mark any finalizing object left from previous cycle */
}
/* }====================================================== */
/*
** {======================================================
** Traverse functions
** =======================================================
*/
static void traverseweakvalue (global_State *g, Table *h) {
Node *n, *limit = gnodelast(h);
/* if there is array part, assume it may have white values (do not
traverse it just to check) */
int hasclears = (h->sizearray > 0);
for (n = gnode(h, 0); n < limit; n++) {
checkdeadkey(n);
if (ttisnil(gval(n))) /* entry is empty? */
removeentry(n); /* remove it */
else {
lua_assert(!ttisnil(gkey(n)));
markvalue(g, gkey(n)); /* mark key */
if (!hasclears && iscleared(g, gval(n))) /* is there a white value? */
hasclears = 1; /* table will have to be cleared */
}
}
if (hasclears)
linktable(h, &g->weak); /* has to be cleared later */
else /* no white values */
linktable(h, &g->grayagain); /* no need to clean */
}
static int traverseephemeron (global_State *g, Table *h) {
int marked = 0; /* true if an object is marked in this traversal */
int hasclears = 0; /* true if table has white keys */
int prop = 0; /* true if table has entry "white-key -> white-value" */
Node *n, *limit = gnodelast(h);
int i;
/* traverse array part (numeric keys are 'strong') */
for (i = 0; i < h->sizearray; i++) {
if (valiswhite(&h->array[i])) {
marked = 1;
reallymarkobject(g, gcvalue(&h->array[i]));
}
}
/* traverse hash part */
for (n = gnode(h, 0); n < limit; n++) {
checkdeadkey(n);
if (ttisnil(gval(n))) /* entry is empty? */
removeentry(n); /* remove it */
else if (iscleared(g, gkey(n))) { /* key is not marked (yet)? */
hasclears = 1; /* table must be cleared */
if (valiswhite(gval(n))) /* value not marked yet? */
prop = 1; /* must propagate again */
}
else if (valiswhite(gval(n))) { /* value not marked yet? */
marked = 1;
reallymarkobject(g, gcvalue(gval(n))); /* mark it now */
}
}
if (g->gcstate != GCSatomic || prop)
linktable(h, &g->ephemeron); /* have to propagate again */
else if (hasclears) /* does table have white keys? */
linktable(h, &g->allweak); /* may have to clean white keys */
else /* no white keys */
linktable(h, &g->grayagain); /* no need to clean */
return marked;
}
static void traversestrongtable (global_State *g, Table *h) {
Node *n, *limit = gnodelast(h);
int i;
for (i = 0; i < h->sizearray; i++) /* traverse array part */
markvalue(g, &h->array[i]);
for (n = gnode(h, 0); n < limit; n++) { /* traverse hash part */
checkdeadkey(n);
if (ttisnil(gval(n))) /* entry is empty? */
removeentry(n); /* remove it */
else {
lua_assert(!ttisnil(gkey(n)));
markvalue(g, gkey(n)); /* mark key */
markvalue(g, gval(n)); /* mark value */
}
}
}
static lu_mem traversetable (global_State *g, Table *h) {
const char *weakkey, *weakvalue;
const TValue *mode = gfasttm(g, h->metatable, TM_MODE);
markobject(g, h->metatable);
if (mode && ttisstring(mode) && /* is there a weak mode? */
((weakkey = strchr(svalue(mode), 'k')),
(weakvalue = strchr(svalue(mode), 'v')),
(weakkey || weakvalue))) { /* is really weak? */
black2gray(obj2gco(h)); /* keep table gray */
if (!weakkey) /* strong keys? */
traverseweakvalue(g, h);
else if (!weakvalue) /* strong values? */
traverseephemeron(g, h);
else /* all weak */
linktable(h, &g->allweak); /* nothing to traverse now */
}
else /* not weak */
traversestrongtable(g, h);
return sizeof(Table) + sizeof(TValue) * h->sizearray +
sizeof(Node) * cast(size_t, sizenode(h));
}
static int traverseproto (global_State *g, Proto *f) {
int i;
if (f->cache && iswhite(obj2gco(f->cache)))
f->cache = NULL; /* allow cache to be collected */
markobject(g, f->source);
for (i = 0; i < f->sizek; i++) /* mark literals */
markvalue(g, &f->k[i]);
for (i = 0; i < f->sizeupvalues; i++) /* mark upvalue names */
markobject(g, f->upvalues[i].name);
for (i = 0; i < f->sizep; i++) /* mark nested protos */
markobject(g, f->p[i]);
for (i = 0; i < f->sizelocvars; i++) /* mark local-variable names */
markobject(g, f->locvars[i].varname);
return sizeof(Proto) + sizeof(Instruction) * f->sizecode +
sizeof(Proto *) * f->sizep +
sizeof(TValue) * f->sizek +
sizeof(int) * f->sizelineinfo +
sizeof(LocVar) * f->sizelocvars +
sizeof(Upvaldesc) * f->sizeupvalues;
}
static lu_mem traverseCclosure (global_State *g, CClosure *cl) {
int i;
for (i = 0; i < cl->nupvalues; i++) /* mark its upvalues */
markvalue(g, &cl->upvalue[i]);
return sizeCclosure(cl->nupvalues);
}
static lu_mem traverseLclosure (global_State *g, LClosure *cl) {
int i;
markobject(g, cl->p); /* mark its prototype */
for (i = 0; i < cl->nupvalues; i++) /* mark its upvalues */
markobject(g, cl->upvals[i]);
return sizeLclosure(cl->nupvalues);
}
static lu_mem traversestack (global_State *g, lua_State *th) {
int n = 0;
StkId o = th->stack;
if (o == NULL)
return 1; /* stack not completely built yet */
for (; o < th->top; o++) /* mark live elements in the stack */
markvalue(g, o);
if (g->gcstate == GCSatomic) { /* final traversal? */
StkId lim = th->stack + th->stacksize; /* real end of stack */
for (; o < lim; o++) /* clear not-marked stack slice */
setnilvalue(o);
}
else { /* count call infos to compute size */
CallInfo *ci;
for (ci = &th->base_ci; ci != th->ci; ci = ci->next)
n++;
}
return sizeof(lua_State) + sizeof(TValue) * th->stacksize +
sizeof(CallInfo) * n;
}
/*
** traverse one gray object, turning it to black (except for threads,
** which are always gray).
*/
static void propagatemark (global_State *g) {
lu_mem size;
GCObject *o = g->gray;
lua_assert(isgray(o));
gray2black(o);
switch (gch(o)->tt) {
case LUA_TTABLE: {
Table *h = gco2t(o);
g->gray = h->gclist; /* remove from 'gray' list */
size = traversetable(g, h);
break;
}
case LUA_TLCL: {
LClosure *cl = gco2lcl(o);
g->gray = cl->gclist; /* remove from 'gray' list */
size = traverseLclosure(g, cl);
break;
}
case LUA_TCCL: {
CClosure *cl = gco2ccl(o);
g->gray = cl->gclist; /* remove from 'gray' list */
size = traverseCclosure(g, cl);
break;
}
case LUA_TTHREAD: {
lua_State *th = gco2th(o);
g->gray = th->gclist; /* remove from 'gray' list */
th->gclist = g->grayagain;
g->grayagain = o; /* insert into 'grayagain' list */
black2gray(o);
size = traversestack(g, th);
break;
}
case LUA_TPROTO: {
Proto *p = gco2p(o);
g->gray = p->gclist; /* remove from 'gray' list */
size = traverseproto(g, p);
break;
}
default: lua_assert(0); return;
}
g->GCmemtrav += size;
}
static void propagateall (global_State *g) {
while (g->gray) propagatemark(g);
}
static void propagatelist (global_State *g, GCObject *l) {
lua_assert(g->gray == NULL); /* no grays left */
g->gray = l;
propagateall(g); /* traverse all elements from 'l' */
}
/*
** retraverse all gray lists. Because tables may be reinserted in other
** lists when traversed, traverse the original lists to avoid traversing
** twice the same table (which is not wrong, but inefficient)
*/
static void retraversegrays (global_State *g) {
GCObject *weak = g->weak; /* save original lists */
GCObject *grayagain = g->grayagain;
GCObject *ephemeron = g->ephemeron;
g->weak = g->grayagain = g->ephemeron = NULL;
propagateall(g); /* traverse main gray list */
propagatelist(g, grayagain);
propagatelist(g, weak);
propagatelist(g, ephemeron);
}
static void convergeephemerons (global_State *g) {
int changed;
do {
GCObject *w;
GCObject *next = g->ephemeron; /* get ephemeron list */
g->ephemeron = NULL; /* tables will return to this list when traversed */
changed = 0;
while ((w = next) != NULL) {
next = gco2t(w)->gclist;
if (traverseephemeron(g, gco2t(w))) { /* traverse marked some value? */
propagateall(g); /* propagate changes */
changed = 1; /* will have to revisit all ephemeron tables */
}
}
} while (changed);
}
/* }====================================================== */
/*
** {======================================================
** Sweep Functions
** =======================================================
*/
/*
** clear entries with unmarked keys from all weaktables in list 'l' up
** to element 'f'
*/
static void clearkeys (global_State *g, GCObject *l, GCObject *f) {
for (; l != f; l = gco2t(l)->gclist) {
Table *h = gco2t(l);
Node *n, *limit = gnodelast(h);
for (n = gnode(h, 0); n < limit; n++) {
if (!ttisnil(gval(n)) && (iscleared(g, gkey(n)))) {
setnilvalue(gval(n)); /* remove value ... */
removeentry(n); /* and remove entry from table */
}
}
}
}
/*
** clear entries with unmarked values from all weaktables in list 'l' up
** to element 'f'
*/
static void clearvalues (global_State *g, GCObject *l, GCObject *f) {
for (; l != f; l = gco2t(l)->gclist) {
Table *h = gco2t(l);
Node *n, *limit = gnodelast(h);
int i;
for (i = 0; i < h->sizearray; i++) {
TValue *o = &h->array[i];
if (iscleared(g, o)) /* value was collected? */
setnilvalue(o); /* remove value */
}
for (n = gnode(h, 0); n < limit; n++) {
if (!ttisnil(gval(n)) && iscleared(g, gval(n))) {
setnilvalue(gval(n)); /* remove value ... */
removeentry(n); /* and remove entry from table */
}
}
}
}
static void freeobj (lua_State *L, GCObject *o) {
switch (gch(o)->tt) {
case LUA_TPROTO: luaF_freeproto(L, gco2p(o)); break;
case LUA_TLCL: {
luaM_freemem(L, o, sizeLclosure(gco2lcl(o)->nupvalues));
break;
}
case LUA_TCCL: {
luaM_freemem(L, o, sizeCclosure(gco2ccl(o)->nupvalues));
break;
}
case LUA_TUPVAL: luaF_freeupval(L, gco2uv(o)); break;
case LUA_TTABLE: luaH_free(L, gco2t(o)); break;
case LUA_TTHREAD: luaE_freethread(L, gco2th(o)); break;
case LUA_TUSERDATA: luaM_freemem(L, o, sizeudata(gco2u(o))); break;
case LUA_TSHRSTR:
G(L)->strt.nuse--;
- /* FALLTHROUGH */
+ fallthrough;
case LUA_TLNGSTR: {
luaM_freemem(L, o, sizestring(gco2ts(o)));
break;
}
default: lua_assert(0);
}
}
#define sweepwholelist(L,p) sweeplist(L,p,MAX_LUMEM)
static GCObject **sweeplist (lua_State *L, GCObject **p, lu_mem count);
/*
** sweep the (open) upvalues of a thread and resize its stack and
** list of call-info structures.
*/
static void sweepthread (lua_State *L, lua_State *L1) {
if (L1->stack == NULL) return; /* stack not completely built yet */
sweepwholelist(L, &L1->openupval); /* sweep open upvalues */
luaE_freeCI(L1); /* free extra CallInfo slots */
/* should not change the stack during an emergency gc cycle */
if (G(L)->gckind != KGC_EMERGENCY)
luaD_shrinkstack(L1);
}
/*
** sweep at most 'count' elements from a list of GCObjects erasing dead
** objects, where a dead (not alive) object is one marked with the "old"
** (non current) white and not fixed.
** In non-generational mode, change all non-dead objects back to white,
** preparing for next collection cycle.
** In generational mode, keep black objects black, and also mark them as
** old; stop when hitting an old object, as all objects after that
** one will be old too.
** When object is a thread, sweep its list of open upvalues too.
*/
static GCObject **sweeplist (lua_State *L, GCObject **p, lu_mem count) {
global_State *g = G(L);
int ow = otherwhite(g);
int toclear, toset; /* bits to clear and to set in all live objects */
int tostop; /* stop sweep when this is true */
if (isgenerational(g)) { /* generational mode? */
toclear = ~0; /* clear nothing */
toset = bitmask(OLDBIT); /* set the old bit of all surviving objects */
tostop = bitmask(OLDBIT); /* do not sweep old generation */
}
else { /* normal mode */
toclear = maskcolors; /* clear all color bits + old bit */
toset = luaC_white(g); /* make object white */
tostop = 0; /* do not stop */
}
while (*p != NULL && count-- > 0) {
GCObject *curr = *p;
int marked = gch(curr)->marked;
if (isdeadm(ow, marked)) { /* is 'curr' dead? */
*p = gch(curr)->next; /* remove 'curr' from list */
freeobj(L, curr); /* erase 'curr' */
}
else {
if (testbits(marked, tostop))
return NULL; /* stop sweeping this list */
if (gch(curr)->tt == LUA_TTHREAD)
sweepthread(L, gco2th(curr)); /* sweep thread's upvalues */
/* update marks */
gch(curr)->marked = cast_byte((marked & toclear) | toset);
p = &gch(curr)->next; /* go to next element */
}
}
return (*p == NULL) ? NULL : p;
}
/*
** sweep a list until a live object (or end of list)
*/
static GCObject **sweeptolive (lua_State *L, GCObject **p, int *n) {
GCObject ** old = p;
int i = 0;
do {
i++;
p = sweeplist(L, p, 1);
} while (p == old);
if (n) *n += i;
return p;
}
/* }====================================================== */
/*
** {======================================================
** Finalization
** =======================================================
*/
static void checkSizes (lua_State *L) {
global_State *g = G(L);
if (g->gckind != KGC_EMERGENCY) { /* do not change sizes in emergency */
int hs = g->strt.size / 2; /* half the size of the string table */
if (g->strt.nuse < cast(lu_int32, hs)) /* using less than that half? */
luaS_resize(L, hs); /* halve its size */
luaZ_freebuffer(L, &g->buff); /* free concatenation buffer */
}
}
static GCObject *udata2finalize (global_State *g) {
GCObject *o = g->tobefnz; /* get first element */
lua_assert(isfinalized(o));
g->tobefnz = gch(o)->next; /* remove it from 'tobefnz' list */
gch(o)->next = g->allgc; /* return it to 'allgc' list */
g->allgc = o;
resetbit(gch(o)->marked, SEPARATED); /* mark that it is not in 'tobefnz' */
lua_assert(!isold(o)); /* see MOVE OLD rule */
if (!keepinvariantout(g)) /* not keeping invariant? */
makewhite(g, o); /* "sweep" object */
return o;
}
static void dothecall (lua_State *L, void *ud) {
UNUSED(ud);
luaD_call(L, L->top - 2, 0, 0);
}
static void GCTM (lua_State *L, int propagateerrors) {
global_State *g = G(L);
const TValue *tm;
TValue v;
setgcovalue(L, &v, udata2finalize(g));
tm = luaT_gettmbyobj(L, &v, TM_GC);
if (tm != NULL && ttisfunction(tm)) { /* is there a finalizer? */
int status;
lu_byte oldah = L->allowhook;
int running = g->gcrunning;
L->allowhook = 0; /* stop debug hooks during GC metamethod */
g->gcrunning = 0; /* avoid GC steps */
setobj2s(L, L->top, tm); /* push finalizer... */
setobj2s(L, L->top + 1, &v); /* ... and its argument */
L->top += 2; /* and (next line) call the finalizer */
status = luaD_pcall(L, dothecall, NULL, savestack(L, L->top - 2), 0);
L->allowhook = oldah; /* restore hooks */
g->gcrunning = running; /* restore state */
if (status != LUA_OK && propagateerrors) { /* error while running __gc? */
if (status == LUA_ERRRUN) { /* is there an error object? */
const char *msg = (ttisstring(L->top - 1))
? svalue(L->top - 1)
: "no message";
luaO_pushfstring(L, "error in __gc metamethod (%s)", msg);
status = LUA_ERRGCMM; /* error in __gc metamethod */
}
luaD_throw(L, status); /* re-throw error */
}
}
}
/*
** move all unreachable objects (or 'all' objects) that need
** finalization from list 'finobj' to list 'tobefnz' (to be finalized)
*/
static void separatetobefnz (lua_State *L, int all) {
global_State *g = G(L);
GCObject **p = &g->finobj;
GCObject *curr;
GCObject **lastnext = &g->tobefnz;
/* find last 'next' field in 'tobefnz' list (to add elements in its end) */
while (*lastnext != NULL)
lastnext = &gch(*lastnext)->next;
while ((curr = *p) != NULL) { /* traverse all finalizable objects */
lua_assert(!isfinalized(curr));
lua_assert(testbit(gch(curr)->marked, SEPARATED));
if (!(iswhite(curr) || all)) /* not being collected? */
p = &gch(curr)->next; /* don't bother with it */
else {
l_setbit(gch(curr)->marked, FINALIZEDBIT); /* won't be finalized again */
*p = gch(curr)->next; /* remove 'curr' from 'finobj' list */
gch(curr)->next = *lastnext; /* link at the end of 'tobefnz' list */
*lastnext = curr;
lastnext = &gch(curr)->next;
}
}
}
/*
** if object 'o' has a finalizer, remove it from 'allgc' list (must
** search the list to find it) and link it in 'finobj' list.
*/
void luaC_checkfinalizer (lua_State *L, GCObject *o, Table *mt) {
global_State *g = G(L);
if (testbit(gch(o)->marked, SEPARATED) || /* obj. is already separated... */
isfinalized(o) || /* ... or is finalized... */
gfasttm(g, mt, TM_GC) == NULL) /* or has no finalizer? */
return; /* nothing to be done */
else { /* move 'o' to 'finobj' list */
GCObject **p;
GCheader *ho = gch(o);
if (g->sweepgc == &ho->next) { /* avoid removing current sweep object */
lua_assert(issweepphase(g));
g->sweepgc = sweeptolive(L, g->sweepgc, NULL);
}
/* search for pointer pointing to 'o' */
for (p = &g->allgc; *p != o; p = &gch(*p)->next) { /* empty */ }
*p = ho->next; /* remove 'o' from root list */
ho->next = g->finobj; /* link it in list 'finobj' */
g->finobj = o;
l_setbit(ho->marked, SEPARATED); /* mark it as such */
if (!keepinvariantout(g)) /* not keeping invariant? */
makewhite(g, o); /* "sweep" object */
else
resetoldbit(o); /* see MOVE OLD rule */
}
}
/* }====================================================== */
/*
** {======================================================
** GC control
** =======================================================
*/
/*
** set a reasonable "time" to wait before starting a new GC cycle;
** cycle will start when memory use hits threshold
*/
static void setpause (global_State *g, l_mem estimate) {
l_mem debt, threshold;
estimate = estimate / PAUSEADJ; /* adjust 'estimate' */
threshold = (g->gcpause < MAX_LMEM / estimate) /* overflow? */
? estimate * g->gcpause /* no overflow */
: MAX_LMEM; /* overflow; truncate to maximum */
debt = -cast(l_mem, threshold - gettotalbytes(g));
luaE_setdebt(g, debt);
}
#define sweepphases \
(bitmask(GCSsweepstring) | bitmask(GCSsweepudata) | bitmask(GCSsweep))
/*
** enter first sweep phase (strings) and prepare pointers for other
** sweep phases. The calls to 'sweeptolive' make pointers point to an
** object inside the list (instead of to the header), so that the real
** sweep do not need to skip objects created between "now" and the start
** of the real sweep.
** Returns how many objects it swept.
*/
static int entersweep (lua_State *L) {
global_State *g = G(L);
int n = 0;
g->gcstate = GCSsweepstring;
lua_assert(g->sweepgc == NULL && g->sweepfin == NULL);
/* prepare to sweep strings, finalizable objects, and regular objects */
g->sweepstrgc = 0;
g->sweepfin = sweeptolive(L, &g->finobj, &n);
g->sweepgc = sweeptolive(L, &g->allgc, &n);
return n;
}
/*
** change GC mode
*/
void luaC_changemode (lua_State *L, int mode) {
global_State *g = G(L);
if (mode == g->gckind) return; /* nothing to change */
if (mode == KGC_GEN) { /* change to generational mode */
/* make sure gray lists are consistent */
luaC_runtilstate(L, bitmask(GCSpropagate));
g->GCestimate = gettotalbytes(g);
g->gckind = KGC_GEN;
}
else { /* change to incremental mode */
/* sweep all objects to turn them back to white
(as white has not changed, nothing extra will be collected) */
g->gckind = KGC_NORMAL;
entersweep(L);
luaC_runtilstate(L, ~sweepphases);
}
}
/*
** call all pending finalizers
*/
static void callallpendingfinalizers (lua_State *L, int propagateerrors) {
global_State *g = G(L);
while (g->tobefnz) {
resetoldbit(g->tobefnz);
GCTM(L, propagateerrors);
}
}
void luaC_freeallobjects (lua_State *L) {
global_State *g = G(L);
int i;
separatetobefnz(L, 1); /* separate all objects with finalizers */
lua_assert(g->finobj == NULL);
callallpendingfinalizers(L, 0);
g->currentwhite = WHITEBITS; /* this "white" makes all objects look dead */
g->gckind = KGC_NORMAL;
sweepwholelist(L, &g->finobj); /* finalizers can create objs. in 'finobj' */
sweepwholelist(L, &g->allgc);
for (i = 0; i < g->strt.size; i++) /* free all string lists */
sweepwholelist(L, &g->strt.hash[i]);
lua_assert(g->strt.nuse == 0);
}
static l_mem atomic (lua_State *L) {
global_State *g = G(L);
l_mem work = -cast(l_mem, g->GCmemtrav); /* start counting work */
GCObject *origweak, *origall;
lua_assert(!iswhite(obj2gco(g->mainthread)));
markobject(g, L); /* mark running thread */
/* registry and global metatables may be changed by API */
markvalue(g, &g->l_registry);
markmt(g); /* mark basic metatables */
/* remark occasional upvalues of (maybe) dead threads */
remarkupvals(g);
propagateall(g); /* propagate changes */
work += g->GCmemtrav; /* stop counting (do not (re)count grays) */
/* traverse objects caught by write barrier and by 'remarkupvals' */
retraversegrays(g);
work -= g->GCmemtrav; /* restart counting */
convergeephemerons(g);
/* at this point, all strongly accessible objects are marked. */
/* clear values from weak tables, before checking finalizers */
clearvalues(g, g->weak, NULL);
clearvalues(g, g->allweak, NULL);
origweak = g->weak; origall = g->allweak;
work += g->GCmemtrav; /* stop counting (objects being finalized) */
separatetobefnz(L, 0); /* separate objects to be finalized */
markbeingfnz(g); /* mark objects that will be finalized */
propagateall(g); /* remark, to propagate `preserveness' */
work -= g->GCmemtrav; /* restart counting */
convergeephemerons(g);
/* at this point, all resurrected objects are marked. */
/* remove dead objects from weak tables */
clearkeys(g, g->ephemeron, NULL); /* clear keys from all ephemeron tables */
clearkeys(g, g->allweak, NULL); /* clear keys from all allweak tables */
/* clear values from resurrected weak tables */
clearvalues(g, g->weak, origweak);
clearvalues(g, g->allweak, origall);
g->currentwhite = cast_byte(otherwhite(g)); /* flip current white */
work += g->GCmemtrav; /* complete counting */
return work; /* estimate of memory marked by 'atomic' */
}
static lu_mem singlestep (lua_State *L) {
global_State *g = G(L);
switch (g->gcstate) {
case GCSpause: {
/* start to count memory traversed */
g->GCmemtrav = g->strt.size * sizeof(GCObject*);
lua_assert(!isgenerational(g));
restartcollection(g);
g->gcstate = GCSpropagate;
return g->GCmemtrav;
}
case GCSpropagate: {
if (g->gray) {
lu_mem oldtrav = g->GCmemtrav;
propagatemark(g);
return g->GCmemtrav - oldtrav; /* memory traversed in this step */
}
else { /* no more `gray' objects */
lu_mem work;
int sw;
g->gcstate = GCSatomic; /* finish mark phase */
g->GCestimate = g->GCmemtrav; /* save what was counted */;
work = atomic(L); /* add what was traversed by 'atomic' */
g->GCestimate += work; /* estimate of total memory traversed */
sw = entersweep(L);
return work + sw * GCSWEEPCOST;
}
}
case GCSsweepstring: {
int i;
for (i = 0; i < GCSWEEPMAX && g->sweepstrgc + i < g->strt.size; i++)
sweepwholelist(L, &g->strt.hash[g->sweepstrgc + i]);
g->sweepstrgc += i;
if (g->sweepstrgc >= g->strt.size) /* no more strings to sweep? */
g->gcstate = GCSsweepudata;
return i * GCSWEEPCOST;
}
case GCSsweepudata: {
if (g->sweepfin) {
g->sweepfin = sweeplist(L, g->sweepfin, GCSWEEPMAX);
return GCSWEEPMAX*GCSWEEPCOST;
}
else {
g->gcstate = GCSsweep;
return 0;
}
}
case GCSsweep: {
if (g->sweepgc) {
g->sweepgc = sweeplist(L, g->sweepgc, GCSWEEPMAX);
return GCSWEEPMAX*GCSWEEPCOST;
}
else {
/* sweep main thread */
GCObject *mt = obj2gco(g->mainthread);
sweeplist(L, &mt, 1);
checkSizes(L);
g->gcstate = GCSpause; /* finish collection */
return GCSWEEPCOST;
}
}
default: lua_assert(0); return 0;
}
}
/*
** advances the garbage collector until it reaches a state allowed
** by 'statemask'
*/
void luaC_runtilstate (lua_State *L, int statesmask) {
global_State *g = G(L);
while (!testbit(statesmask, g->gcstate))
singlestep(L);
}
static void generationalcollection (lua_State *L) {
global_State *g = G(L);
lua_assert(g->gcstate == GCSpropagate);
if (g->GCestimate == 0) { /* signal for another major collection? */
luaC_fullgc(L, 0); /* perform a full regular collection */
g->GCestimate = gettotalbytes(g); /* update control */
}
else {
lu_mem estimate = g->GCestimate;
luaC_runtilstate(L, bitmask(GCSpause)); /* run complete (minor) cycle */
g->gcstate = GCSpropagate; /* skip restart */
if (gettotalbytes(g) > (estimate / 100) * g->gcmajorinc)
g->GCestimate = 0; /* signal for a major collection */
else
g->GCestimate = estimate; /* keep estimate from last major coll. */
}
setpause(g, gettotalbytes(g));
lua_assert(g->gcstate == GCSpropagate);
}
static void incstep (lua_State *L) {
global_State *g = G(L);
l_mem debt = g->GCdebt;
int stepmul = g->gcstepmul;
if (stepmul < 40) stepmul = 40; /* avoid ridiculous low values (and 0) */
/* convert debt from Kb to 'work units' (avoid zero debt and overflows) */
debt = (debt / STEPMULADJ) + 1;
debt = (debt < MAX_LMEM / stepmul) ? debt * stepmul : MAX_LMEM;
do { /* always perform at least one single step */
lu_mem work = singlestep(L); /* do some work */
debt -= work;
} while (debt > -GCSTEPSIZE && g->gcstate != GCSpause);
if (g->gcstate == GCSpause)
setpause(g, g->GCestimate); /* pause until next cycle */
else {
debt = (debt / stepmul) * STEPMULADJ; /* convert 'work units' to Kb */
luaE_setdebt(g, debt);
}
}
/*
** performs a basic GC step
*/
void luaC_forcestep (lua_State *L) {
global_State *g = G(L);
int i;
if (isgenerational(g)) generationalcollection(L);
else incstep(L);
/* run a few finalizers (or all of them at the end of a collect cycle) */
for (i = 0; g->tobefnz && (i < GCFINALIZENUM || g->gcstate == GCSpause); i++)
GCTM(L, 1); /* call one finalizer */
}
/*
** performs a basic GC step only if collector is running
*/
void luaC_step (lua_State *L) {
global_State *g = G(L);
if (g->gcrunning) luaC_forcestep(L);
else luaE_setdebt(g, -GCSTEPSIZE); /* avoid being called too often */
}
/*
** performs a full GC cycle; if "isemergency", does not call
** finalizers (which could change stack positions)
*/
void luaC_fullgc (lua_State *L, int isemergency) {
global_State *g = G(L);
int origkind = g->gckind;
lua_assert(origkind != KGC_EMERGENCY);
if (isemergency) /* do not run finalizers during emergency GC */
g->gckind = KGC_EMERGENCY;
else {
g->gckind = KGC_NORMAL;
callallpendingfinalizers(L, 1);
}
if (keepinvariant(g)) { /* may there be some black objects? */
/* must sweep all objects to turn them back to white
(as white has not changed, nothing will be collected) */
entersweep(L);
}
/* finish any pending sweep phase to start a new cycle */
luaC_runtilstate(L, bitmask(GCSpause));
luaC_runtilstate(L, ~bitmask(GCSpause)); /* start new collection */
luaC_runtilstate(L, bitmask(GCSpause)); /* run entire collection */
if (origkind == KGC_GEN) { /* generational mode? */
/* generational mode must be kept in propagate phase */
luaC_runtilstate(L, bitmask(GCSpropagate));
}
g->gckind = origkind;
setpause(g, gettotalbytes(g));
if (!isemergency) /* do not run finalizers during emergency GC */
callallpendingfinalizers(L, 1);
}
/* }====================================================== */
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/lua/llex.c b/sys/contrib/openzfs/module/lua/llex.c
index 0c3488a551f6..f2c9bf826c82 100644
--- a/sys/contrib/openzfs/module/lua/llex.c
+++ b/sys/contrib/openzfs/module/lua/llex.c
@@ -1,531 +1,531 @@
/* BEGIN CSTYLED */
/*
** $Id: llex.c,v 2.63.1.3 2015/02/09 17:56:34 roberto Exp $
** Lexical Analyzer
** See Copyright Notice in lua.h
*/
#define llex_c
#define LUA_CORE
#include <sys/lua/lua.h>
#include "lctype.h"
#include "ldo.h"
#include "llex.h"
#include "lobject.h"
#include "lparser.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "lzio.h"
#define next(ls) (ls->current = zgetc(ls->z))
#define currIsNewline(ls) (ls->current == '\n' || ls->current == '\r')
/* ORDER RESERVED */
static const char *const luaX_tokens [] = {
"and", "break", "do", "else", "elseif",
"end", "false", "for", "function", "goto", "if",
"in", "local", "nil", "not", "or", "repeat",
"return", "then", "true", "until", "while",
"..", "...", "==", ">=", "<=", "~=", "::", "<eof>",
"<number>", "<name>", "<string>"
};
#define save_and_next(ls) (save(ls, ls->current), next(ls))
static l_noret lexerror (LexState *ls, const char *msg, int token);
static void save (LexState *ls, int c) {
Mbuffer *b = ls->buff;
if (luaZ_bufflen(b) + 1 > luaZ_sizebuffer(b)) {
size_t newsize;
if (luaZ_sizebuffer(b) >= MAX_SIZET/2)
lexerror(ls, "lexical element too long", 0);
newsize = luaZ_sizebuffer(b) * 2;
luaZ_resizebuffer(ls->L, b, newsize);
}
b->buffer[luaZ_bufflen(b)++] = cast(char, c);
}
void luaX_init (lua_State *L) {
int i;
for (i=0; i<NUM_RESERVED; i++) {
TString *ts = luaS_new(L, luaX_tokens[i]);
luaS_fix(ts); /* reserved words are never collected */
ts->tsv.extra = cast_byte(i+1); /* reserved word */
}
}
const char *luaX_token2str (LexState *ls, int token) {
if (token < FIRST_RESERVED) { /* single-byte symbols? */
lua_assert(token == cast(unsigned char, token));
return (lisprint(token)) ? luaO_pushfstring(ls->L, LUA_QL("%c"), token) :
luaO_pushfstring(ls->L, "char(%d)", token);
}
else {
const char *s = luaX_tokens[token - FIRST_RESERVED];
if (token < TK_EOS) /* fixed format (symbols and reserved words)? */
return luaO_pushfstring(ls->L, LUA_QS, s);
else /* names, strings, and numerals */
return s;
}
}
static const char *txtToken (LexState *ls, int token) {
switch (token) {
case TK_NAME:
case TK_STRING:
case TK_NUMBER:
save(ls, '\0');
return luaO_pushfstring(ls->L, LUA_QS, luaZ_buffer(ls->buff));
default:
return luaX_token2str(ls, token);
}
}
static l_noret lexerror (LexState *ls, const char *msg, int token) {
char buff[LUA_IDSIZE];
luaO_chunkid(buff, getstr(ls->source), LUA_IDSIZE);
msg = luaO_pushfstring(ls->L, "%s:%d: %s", buff, ls->linenumber, msg);
if (token)
luaO_pushfstring(ls->L, "%s near %s", msg, txtToken(ls, token));
luaD_throw(ls->L, LUA_ERRSYNTAX);
}
l_noret luaX_syntaxerror (LexState *ls, const char *msg) {
lexerror(ls, msg, ls->t.token);
}
/*
** creates a new string and anchors it in function's table so that
** it will not be collected until the end of the function's compilation
** (by that time it should be anchored in function's prototype)
*/
TString *luaX_newstring (LexState *ls, const char *str, size_t l) {
lua_State *L = ls->L;
TValue *o; /* entry for `str' */
TString *ts = luaS_newlstr(L, str, l); /* create new string */
setsvalue2s(L, L->top++, ts); /* temporarily anchor it in stack */
o = luaH_set(L, ls->fs->h, L->top - 1);
if (ttisnil(o)) { /* not in use yet? (see 'addK') */
/* boolean value does not need GC barrier;
table has no metatable, so it does not need to invalidate cache */
setbvalue(o, 1); /* t[string] = true */
luaC_checkGC(L);
}
else { /* string already present */
ts = rawtsvalue(keyfromval(o)); /* re-use value previously stored */
}
L->top--; /* remove string from stack */
return ts;
}
/*
** increment line number and skips newline sequence (any of
** \n, \r, \n\r, or \r\n)
*/
static void inclinenumber (LexState *ls) {
int old = ls->current;
lua_assert(currIsNewline(ls));
next(ls); /* skip `\n' or `\r' */
if (currIsNewline(ls) && ls->current != old)
next(ls); /* skip `\n\r' or `\r\n' */
if (++ls->linenumber >= MAX_INT)
lexerror(ls, "chunk has too many lines", 0);
}
void luaX_setinput (lua_State *L, LexState *ls, ZIO *z, TString *source,
int firstchar) {
ls->decpoint = '.';
ls->L = L;
ls->current = firstchar;
ls->lookahead.token = TK_EOS; /* no look-ahead token */
ls->z = z;
ls->fs = NULL;
ls->linenumber = 1;
ls->lastline = 1;
ls->source = source;
ls->envn = luaS_new(L, LUA_ENV); /* create env name */
luaS_fix(ls->envn); /* never collect this name */
luaZ_resizebuffer(ls->L, ls->buff, LUA_MINBUFFER); /* initialize buffer */
}
/*
** =======================================================
** LEXICAL ANALYZER
** =======================================================
*/
static int check_next (LexState *ls, const char *set) {
if (ls->current == '\0' || !strchr(set, ls->current))
return 0;
save_and_next(ls);
return 1;
}
/*
** change all characters 'from' in buffer to 'to'
*/
static void buffreplace (LexState *ls, char from, char to) {
size_t n = luaZ_bufflen(ls->buff);
char *p = luaZ_buffer(ls->buff);
while (n--)
if (p[n] == from) p[n] = to;
}
#if !defined(getlocaledecpoint)
#define getlocaledecpoint() (localeconv()->decimal_point[0])
#endif
#define buff2d(b,e) luaO_str2d(luaZ_buffer(b), luaZ_bufflen(b) - 1, e)
/*
** in case of format error, try to change decimal point separator to
** the one defined in the current locale and check again
*/
static void trydecpoint (LexState *ls, SemInfo *seminfo) {
char old = ls->decpoint;
ls->decpoint = getlocaledecpoint();
buffreplace(ls, old, ls->decpoint); /* try new decimal separator */
if (!buff2d(ls->buff, &seminfo->r)) {
/* format error with correct decimal point: no more options */
buffreplace(ls, ls->decpoint, '.'); /* undo change (for error message) */
lexerror(ls, "malformed number", TK_NUMBER);
}
}
/* LUA_NUMBER */
/*
** this function is quite liberal in what it accepts, as 'luaO_str2d'
** will reject ill-formed numerals.
*/
static void read_numeral (LexState *ls, SemInfo *seminfo) {
const char *expo = "Ee";
int first = ls->current;
lua_assert(lisdigit(ls->current));
save_and_next(ls);
if (first == '0' && check_next(ls, "Xx")) /* hexadecimal? */
expo = "Pp";
for (;;) {
if (check_next(ls, expo)) /* exponent part? */
(void) check_next(ls, "+-"); /* optional exponent sign */
if (lisxdigit(ls->current) || ls->current == '.')
save_and_next(ls);
else break;
}
save(ls, '\0');
buffreplace(ls, '.', ls->decpoint); /* follow locale for decimal point */
if (!buff2d(ls->buff, &seminfo->r)) /* format error? */
trydecpoint(ls, seminfo); /* try to update decimal point separator */
}
/*
** skip a sequence '[=*[' or ']=*]' and return its number of '='s or
** -1 if sequence is malformed
*/
static int skip_sep (LexState *ls) {
int count = 0;
int s = ls->current;
lua_assert(s == '[' || s == ']');
save_and_next(ls);
while (ls->current == '=') {
save_and_next(ls);
count++;
}
return (ls->current == s) ? count : (-count) - 1;
}
static void read_long_string (LexState *ls, SemInfo *seminfo, int sep) {
save_and_next(ls); /* skip 2nd `[' */
if (currIsNewline(ls)) /* string starts with a newline? */
inclinenumber(ls); /* skip it */
for (;;) {
switch (ls->current) {
case EOZ:
lexerror(ls, (seminfo) ? "unfinished long string" :
"unfinished long comment", TK_EOS);
break; /* to avoid warnings */
case ']': {
if (skip_sep(ls) == sep) {
save_and_next(ls); /* skip 2nd `]' */
goto endloop;
}
break;
}
case '\n': case '\r': {
save(ls, '\n');
inclinenumber(ls);
if (!seminfo) luaZ_resetbuffer(ls->buff); /* avoid wasting space */
break;
}
default: {
if (seminfo) save_and_next(ls);
else next(ls);
}
}
} endloop:
if (seminfo)
seminfo->ts = luaX_newstring(ls, luaZ_buffer(ls->buff) + (2 + sep),
luaZ_bufflen(ls->buff) - 2*(2 + sep));
}
static void escerror (LexState *ls, int *c, int n, const char *msg) {
int i;
luaZ_resetbuffer(ls->buff); /* prepare error message */
save(ls, '\\');
for (i = 0; i < n && c[i] != EOZ; i++)
save(ls, c[i]);
lexerror(ls, msg, TK_STRING);
}
static int readhexaesc (LexState *ls) {
int c[3], i; /* keep input for error message */
int r = 0; /* result accumulator */
c[0] = 'x'; /* for error message */
for (i = 1; i < 3; i++) { /* read two hexadecimal digits */
c[i] = next(ls);
if (!lisxdigit(c[i]))
escerror(ls, c, i + 1, "hexadecimal digit expected");
r = (r << 4) + luaO_hexavalue(c[i]);
}
return r;
}
static int readdecesc (LexState *ls) {
int c[3], i;
int r = 0; /* result accumulator */
for (i = 0; i < 3 && lisdigit(ls->current); i++) { /* read up to 3 digits */
c[i] = ls->current;
r = 10*r + c[i] - '0';
next(ls);
}
if (r > UCHAR_MAX)
escerror(ls, c, i, "decimal escape too large");
return r;
}
static void read_string (LexState *ls, int del, SemInfo *seminfo) {
save_and_next(ls); /* keep delimiter (for error messages) */
while (ls->current != del) {
switch (ls->current) {
case EOZ:
lexerror(ls, "unfinished string", TK_EOS);
break; /* to avoid warnings */
case '\n':
case '\r':
lexerror(ls, "unfinished string", TK_STRING);
break; /* to avoid warnings */
case '\\': { /* escape sequences */
int c; /* final character to be saved */
next(ls); /* do not save the `\' */
switch (ls->current) {
case 'a': c = '\a'; goto read_save;
case 'b': c = '\b'; goto read_save;
case 'f': c = '\f'; goto read_save;
case 'n': c = '\n'; goto read_save;
case 'r': c = '\r'; goto read_save;
case 't': c = '\t'; goto read_save;
case 'v': c = '\v'; goto read_save;
case 'x': c = readhexaesc(ls); goto read_save;
case '\n': case '\r':
inclinenumber(ls); c = '\n'; goto only_save;
case '\\': case '\"': case '\'':
c = ls->current; goto read_save;
case EOZ: goto no_save; /* will raise an error next loop */
case 'z': { /* zap following span of spaces */
next(ls); /* skip the 'z' */
while (lisspace(ls->current)) {
if (currIsNewline(ls)) inclinenumber(ls);
else next(ls);
}
goto no_save;
}
default: {
if (!lisdigit(ls->current))
escerror(ls, &ls->current, 1, "invalid escape sequence");
/* digital escape \ddd */
c = readdecesc(ls);
goto only_save;
}
}
read_save: next(ls); /* read next character */
only_save: save(ls, c); /* save 'c' */
no_save: break;
}
default:
save_and_next(ls);
}
}
save_and_next(ls); /* skip delimiter */
seminfo->ts = luaX_newstring(ls, luaZ_buffer(ls->buff) + 1,
luaZ_bufflen(ls->buff) - 2);
}
static int llex (LexState *ls, SemInfo *seminfo) {
luaZ_resetbuffer(ls->buff);
for (;;) {
switch (ls->current) {
case '\n': case '\r': { /* line breaks */
inclinenumber(ls);
break;
}
case ' ': case '\f': case '\t': case '\v': { /* spaces */
next(ls);
break;
}
case '-': { /* '-' or '--' (comment) */
next(ls);
if (ls->current != '-') return '-';
/* else is a comment */
next(ls);
if (ls->current == '[') { /* long comment? */
int sep = skip_sep(ls);
luaZ_resetbuffer(ls->buff); /* `skip_sep' may dirty the buffer */
if (sep >= 0) {
read_long_string(ls, NULL, sep); /* skip long comment */
luaZ_resetbuffer(ls->buff); /* previous call may dirty the buff. */
break;
}
}
/* else short comment */
while (!currIsNewline(ls) && ls->current != EOZ)
next(ls); /* skip until end of line (or end of file) */
break;
}
case '[': { /* long string or simply '[' */
int sep = skip_sep(ls);
if (sep >= 0) {
read_long_string(ls, seminfo, sep);
return TK_STRING;
} else if (sep == -1) {
return '[';
} else {
lexerror(ls, "invalid long string delimiter", TK_STRING);
break;
}
}
case '=': {
next(ls);
if (ls->current != '=') return '=';
else { next(ls); return TK_EQ; }
}
case '<': {
next(ls);
if (ls->current != '=') return '<';
else { next(ls); return TK_LE; }
}
case '>': {
next(ls);
if (ls->current != '=') return '>';
else { next(ls); return TK_GE; }
}
case '~': {
next(ls);
if (ls->current != '=') return '~';
else { next(ls); return TK_NE; }
}
case ':': {
next(ls);
if (ls->current != ':') return ':';
else { next(ls); return TK_DBCOLON; }
}
case '"': case '\'': { /* short literal strings */
read_string(ls, ls->current, seminfo);
return TK_STRING;
}
case '.': { /* '.', '..', '...', or number */
save_and_next(ls);
if (check_next(ls, ".")) {
if (check_next(ls, "."))
return TK_DOTS; /* '...' */
else return TK_CONCAT; /* '..' */
}
else if (!lisdigit(ls->current)) return '.';
/* else go through */
}
- /* FALLTHROUGH */
+ fallthrough;
case '0': case '1': case '2': case '3': case '4':
case '5': case '6': case '7': case '8': case '9': {
read_numeral(ls, seminfo);
return TK_NUMBER;
}
case EOZ: {
return TK_EOS;
}
default: {
if (lislalpha(ls->current)) { /* identifier or reserved word? */
TString *ts;
do {
save_and_next(ls);
} while (lislalnum(ls->current));
ts = luaX_newstring(ls, luaZ_buffer(ls->buff),
luaZ_bufflen(ls->buff));
seminfo->ts = ts;
if (isreserved(ts)) /* reserved word? */
return ts->tsv.extra - 1 + FIRST_RESERVED;
else {
return TK_NAME;
}
}
else { /* single-char tokens (+ - / ...) */
int c = ls->current;
next(ls);
return c;
}
}
}
}
}
void luaX_next (LexState *ls) {
ls->lastline = ls->linenumber;
if (ls->lookahead.token != TK_EOS) { /* is there a look-ahead token? */
ls->t = ls->lookahead; /* use this one */
ls->lookahead.token = TK_EOS; /* and discharge it */
}
else
ls->t.token = llex(ls, &ls->t.seminfo); /* read next token */
}
int luaX_lookahead (LexState *ls) {
lua_assert(ls->lookahead.token == TK_EOS);
ls->lookahead.token = llex(ls, &ls->lookahead.seminfo);
return ls->lookahead.token;
}
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/lua/lstrlib.c b/sys/contrib/openzfs/module/lua/lstrlib.c
index 12027757bf53..46e3d8fb35bb 100644
--- a/sys/contrib/openzfs/module/lua/lstrlib.c
+++ b/sys/contrib/openzfs/module/lua/lstrlib.c
@@ -1,1040 +1,1040 @@
/* BEGIN CSTYLED */
/*
** $Id: lstrlib.c,v 1.178.1.1 2013/04/12 18:48:47 roberto Exp $
** Standard library for string operations and pattern-matching
** See Copyright Notice in lua.h
*/
#define lstrlib_c
#define LUA_LIB
#include <sys/lua/lua.h>
#include <sys/lua/lauxlib.h>
#include <sys/lua/lualib.h>
/*
** maximum number of captures that a pattern can do during
** pattern-matching. This limit is arbitrary.
*/
#if !defined(LUA_MAXCAPTURES)
#define LUA_MAXCAPTURES 16
#endif
/* macro to `unsign' a character */
#define uchar(c) ((unsigned char)(c))
/*
* The provided version of sprintf returns a char *, but str_format expects
* it to return the number of characters printed. This version has the expected
* behavior.
*/
static size_t str_sprintf(char *buf, const char *fmt, ...) {
va_list args;
size_t len;
va_start(args, fmt);
len = vsnprintf(buf, INT_MAX, fmt, args);
va_end(args);
return len;
}
static int str_len (lua_State *L) {
size_t l;
luaL_checklstring(L, 1, &l);
lua_pushinteger(L, (lua_Integer)l);
return 1;
}
/* translate a relative string position: negative means back from end */
static size_t posrelat (ptrdiff_t pos, size_t len) {
if (pos >= 0) return (size_t)pos;
else if (0u - (size_t)pos > len) return 0;
else return len - ((size_t)-pos) + 1;
}
static int str_sub (lua_State *L) {
size_t l;
const char *s = luaL_checklstring(L, 1, &l);
size_t start = posrelat(luaL_checkinteger(L, 2), l);
size_t end = posrelat(luaL_optinteger(L, 3, -1), l);
if (start < 1) start = 1;
if (end > l) end = l;
if (start <= end)
lua_pushlstring(L, s + start - 1, end - start + 1);
else lua_pushliteral(L, "");
return 1;
}
static int str_reverse (lua_State *L) {
size_t l, i;
luaL_Buffer b;
const char *s = luaL_checklstring(L, 1, &l);
char *p = luaL_buffinitsize(L, &b, l);
for (i = 0; i < l; i++)
p[i] = s[l - i - 1];
luaL_pushresultsize(&b, l);
return 1;
}
static int str_lower (lua_State *L) {
size_t l;
size_t i;
luaL_Buffer b;
const char *s = luaL_checklstring(L, 1, &l);
char *p = luaL_buffinitsize(L, &b, l);
for (i=0; i<l; i++)
p[i] = tolower(uchar(s[i]));
luaL_pushresultsize(&b, l);
return 1;
}
static int str_upper (lua_State *L) {
size_t l;
size_t i;
luaL_Buffer b;
const char *s = luaL_checklstring(L, 1, &l);
char *p = luaL_buffinitsize(L, &b, l);
for (i=0; i<l; i++)
p[i] = toupper(uchar(s[i]));
luaL_pushresultsize(&b, l);
return 1;
}
/* reasonable limit to avoid arithmetic overflow */
#define MAXSIZE ((~(size_t)0) >> 1)
static int str_rep (lua_State *L) {
size_t l, lsep;
const char *s = luaL_checklstring(L, 1, &l);
int n = luaL_checkint(L, 2);
const char *sep = luaL_optlstring(L, 3, "", &lsep);
if (n <= 0) lua_pushliteral(L, "");
else if (l + lsep < l || l + lsep >= MAXSIZE / n) /* may overflow? */
return luaL_error(L, "resulting string too large");
else {
size_t totallen = n * l + (n - 1) * lsep;
luaL_Buffer b;
char *p = luaL_buffinitsize(L, &b, totallen);
while (n-- > 1) { /* first n-1 copies (followed by separator) */
memcpy(p, s, l * sizeof(char)); p += l;
if (lsep > 0) { /* avoid empty 'memcpy' (may be expensive) */
memcpy(p, sep, lsep * sizeof(char)); p += lsep;
}
}
memcpy(p, s, l * sizeof(char)); /* last copy (not followed by separator) */
luaL_pushresultsize(&b, totallen);
}
return 1;
}
static int str_byte (lua_State *L) {
size_t l;
const char *s = luaL_checklstring(L, 1, &l);
size_t posi = posrelat(luaL_optinteger(L, 2, 1), l);
size_t pose = posrelat(luaL_optinteger(L, 3, posi), l);
int n, i;
if (posi < 1) posi = 1;
if (pose > l) pose = l;
if (posi > pose) return 0; /* empty interval; return no values */
n = (int)(pose - posi + 1);
if (posi + n <= pose) /* (size_t -> int) overflow? */
return luaL_error(L, "string slice too long");
luaL_checkstack(L, n, "string slice too long");
for (i=0; i<n; i++)
lua_pushinteger(L, uchar(s[posi+i-1]));
return n;
}
static int str_char (lua_State *L) {
int n = lua_gettop(L); /* number of arguments */
int i;
luaL_Buffer b;
char *p = luaL_buffinitsize(L, &b, n);
for (i=1; i<=n; i++) {
int c = luaL_checkint(L, i);
luaL_argcheck(L, uchar(c) == c, i, "value out of range");
p[i - 1] = uchar(c);
}
luaL_pushresultsize(&b, n);
return 1;
}
#if defined(LUA_USE_DUMP)
static int writer (lua_State *L, const void* b, size_t size, void* B) {
(void)L;
luaL_addlstring((luaL_Buffer*) B, (const char *)b, size);
return 0;
}
static int str_dump (lua_State *L) {
luaL_Buffer b;
luaL_checktype(L, 1, LUA_TFUNCTION);
lua_settop(L, 1);
luaL_buffinit(L,&b);
if (lua_dump(L, writer, &b) != 0)
return luaL_error(L, "unable to dump given function");
luaL_pushresult(&b);
return 1;
}
#endif
/*
** {======================================================
** PATTERN MATCHING
** =======================================================
*/
#define CAP_UNFINISHED (-1)
#define CAP_POSITION (-2)
typedef struct MatchState {
int matchdepth; /* control for recursive depth (to avoid C stack overflow) */
const char *src_init; /* init of source string */
const char *src_end; /* end ('\0') of source string */
const char *p_end; /* end ('\0') of pattern */
lua_State *L;
int level; /* total number of captures (finished or unfinished) */
struct {
const char *init;
ptrdiff_t len;
} capture[LUA_MAXCAPTURES];
} MatchState;
/* recursive function */
static const char *match (MatchState *ms, const char *s, const char *p);
/* maximum recursion depth for 'match' */
#if !defined(MAXCCALLS)
#define MAXCCALLS 200
#endif
#define L_ESC '%'
#define SPECIALS "^$*+?.([%-"
static int check_capture (MatchState *ms, int l) {
l -= '1';
if (l < 0 || l >= ms->level || ms->capture[l].len == CAP_UNFINISHED)
return luaL_error(ms->L, "invalid capture index %%%d", l + 1);
return l;
}
static int capture_to_close (MatchState *ms) {
int level = ms->level;
for (level--; level>=0; level--)
if (ms->capture[level].len == CAP_UNFINISHED) return level;
return luaL_error(ms->L, "invalid pattern capture");
}
static const char *classend (MatchState *ms, const char *p) {
switch (*p++) {
case L_ESC: {
if (p == ms->p_end)
luaL_error(ms->L, "malformed pattern (ends with " LUA_QL("%%") ")");
return p+1;
}
case '[': {
if (*p == '^') p++;
do { /* look for a `]' */
if (p == ms->p_end)
luaL_error(ms->L, "malformed pattern (missing " LUA_QL("]") ")");
if (*(p++) == L_ESC && p < ms->p_end)
p++; /* skip escapes (e.g. `%]') */
} while (*p != ']');
return p+1;
}
default: {
return p;
}
}
}
static int match_class (int c, int cl) {
int res;
switch (tolower(cl)) {
case 'a' : res = isalpha(c); break;
case 'c' : res = iscntrl(c); break;
case 'd' : res = isdigit(c); break;
case 'g' : res = isgraph(c); break;
case 'l' : res = islower(c); break;
case 'p' : res = ispunct(c); break;
case 's' : res = isspace(c); break;
case 'u' : res = isupper(c); break;
case 'w' : res = isalnum(c); break;
case 'x' : res = isxdigit(c); break;
case 'z' : res = (c == 0); break; /* deprecated option */
default: return (cl == c);
}
return (islower(cl) ? res : !res);
}
static int matchbracketclass (int c, const char *p, const char *ec) {
int sig = 1;
if (*(p+1) == '^') {
sig = 0;
p++; /* skip the `^' */
}
while (++p < ec) {
if (*p == L_ESC) {
p++;
if (match_class(c, uchar(*p)))
return sig;
}
else if ((*(p+1) == '-') && (p+2 < ec)) {
p+=2;
if (uchar(*(p-2)) <= c && c <= uchar(*p))
return sig;
}
else if (uchar(*p) == c) return sig;
}
return !sig;
}
static int singlematch (MatchState *ms, const char *s, const char *p,
const char *ep) {
if (s >= ms->src_end)
return 0;
else {
int c = uchar(*s);
switch (*p) {
case '.': return 1; /* matches any char */
case L_ESC: return match_class(c, uchar(*(p+1)));
case '[': return matchbracketclass(c, p, ep-1);
default: return (uchar(*p) == c);
}
}
}
static const char *matchbalance (MatchState *ms, const char *s,
const char *p) {
if (p >= ms->p_end - 1)
luaL_error(ms->L, "malformed pattern "
"(missing arguments to " LUA_QL("%%b") ")");
if (*s != *p) return NULL;
else {
int b = *p;
int e = *(p+1);
int cont = 1;
while (++s < ms->src_end) {
if (*s == e) {
if (--cont == 0) return s+1;
}
else if (*s == b) cont++;
}
}
return NULL; /* string ends out of balance */
}
static const char *max_expand (MatchState *ms, const char *s,
const char *p, const char *ep) {
ptrdiff_t i = 0; /* counts maximum expand for item */
while (singlematch(ms, s + i, p, ep))
i++;
/* keeps trying to match with the maximum repetitions */
while (i>=0) {
const char *res = match(ms, (s+i), ep+1);
if (res) return res;
i--; /* else didn't match; reduce 1 repetition to try again */
}
return NULL;
}
static const char *min_expand (MatchState *ms, const char *s,
const char *p, const char *ep) {
for (;;) {
const char *res = match(ms, s, ep+1);
if (res != NULL)
return res;
else if (singlematch(ms, s, p, ep))
s++; /* try with one more repetition */
else return NULL;
}
}
static const char *start_capture (MatchState *ms, const char *s,
const char *p, int what) {
const char *res;
int level = ms->level;
if (level >= LUA_MAXCAPTURES) luaL_error(ms->L, "too many captures");
ms->capture[level].init = s;
ms->capture[level].len = what;
ms->level = level+1;
if ((res=match(ms, s, p)) == NULL) /* match failed? */
ms->level--; /* undo capture */
return res;
}
static const char *end_capture (MatchState *ms, const char *s,
const char *p) {
int l = capture_to_close(ms);
const char *res;
ms->capture[l].len = s - ms->capture[l].init; /* close capture */
if ((res = match(ms, s, p)) == NULL) /* match failed? */
ms->capture[l].len = CAP_UNFINISHED; /* undo capture */
return res;
}
static const char *match_capture (MatchState *ms, const char *s, int l) {
size_t len;
l = check_capture(ms, l);
len = ms->capture[l].len;
if ((size_t)(ms->src_end-s) >= len &&
memcmp(ms->capture[l].init, s, len) == 0)
return s+len;
else return NULL;
}
static const char *match (MatchState *ms, const char *s, const char *p) {
if (ms->matchdepth-- == 0)
luaL_error(ms->L, "pattern too complex");
init: /* using goto's to optimize tail recursion */
if (p != ms->p_end) { /* end of pattern? */
switch (*p) {
case '(': { /* start capture */
if (*(p + 1) == ')') /* position capture? */
s = start_capture(ms, s, p + 2, CAP_POSITION);
else
s = start_capture(ms, s, p + 1, CAP_UNFINISHED);
break;
}
case ')': { /* end capture */
s = end_capture(ms, s, p + 1);
break;
}
case '$': {
if ((p + 1) != ms->p_end) /* is the `$' the last char in pattern? */
goto dflt; /* no; go to default */
s = (s == ms->src_end) ? s : NULL; /* check end of string */
break;
}
case L_ESC: { /* escaped sequences not in the format class[*+?-]? */
switch (*(p + 1)) {
case 'b': { /* balanced string? */
s = matchbalance(ms, s, p + 2);
if (s != NULL) {
p += 4; goto init; /* return match(ms, s, p + 4); */
} /* else fail (s == NULL) */
break;
}
case 'f': { /* frontier? */
const char *ep; char previous;
p += 2;
if (*p != '[')
luaL_error(ms->L, "missing " LUA_QL("[") " after "
LUA_QL("%%f") " in pattern");
ep = classend(ms, p); /* points to what is next */
previous = (s == ms->src_init) ? '\0' : *(s - 1);
if (!matchbracketclass(uchar(previous), p, ep - 1) &&
matchbracketclass(uchar(*s), p, ep - 1)) {
p = ep; goto init; /* return match(ms, s, ep); */
}
s = NULL; /* match failed */
break;
}
case '0': case '1': case '2': case '3':
case '4': case '5': case '6': case '7':
case '8': case '9': { /* capture results (%0-%9)? */
s = match_capture(ms, s, uchar(*(p + 1)));
if (s != NULL) {
p += 2; goto init; /* return match(ms, s, p + 2) */
}
break;
}
default: goto dflt;
}
break;
}
default: dflt: { /* pattern class plus optional suffix */
const char *ep = classend(ms, p); /* points to optional suffix */
/* does not match at least once? */
if (!singlematch(ms, s, p, ep)) {
if (*ep == '*' || *ep == '?' || *ep == '-') { /* accept empty? */
p = ep + 1; goto init; /* return match(ms, s, ep + 1); */
}
else /* '+' or no suffix */
s = NULL; /* fail */
}
else { /* matched once */
switch (*ep) { /* handle optional suffix */
case '?': { /* optional */
const char *res;
if ((res = match(ms, s + 1, ep + 1)) != NULL)
s = res;
else {
p = ep + 1; goto init; /* else return match(ms, s, ep + 1); */
}
break;
}
case '+': /* 1 or more repetitions */
s++; /* 1 match already done */
- /* FALLTHROUGH */
+ fallthrough;
case '*': /* 0 or more repetitions */
s = max_expand(ms, s, p, ep);
break;
case '-': /* 0 or more repetitions (minimum) */
s = min_expand(ms, s, p, ep);
break;
default: /* no suffix */
s++; p = ep; goto init; /* return match(ms, s + 1, ep); */
}
}
break;
}
}
}
ms->matchdepth++;
return s;
}
static const char *lmemfind (const char *s1, size_t l1,
const char *s2, size_t l2) {
if (l2 == 0) return s1; /* empty strings are everywhere */
else if (l2 > l1) return NULL; /* avoids a negative `l1' */
else {
const char *init; /* to search for a `*s2' inside `s1' */
l2--; /* 1st char will be checked by `memchr' */
l1 = l1-l2; /* `s2' cannot be found after that */
while (l1 > 0 && (init = (const char *)memchr(s1, *s2, l1)) != NULL) {
init++; /* 1st char is already checked */
if (memcmp(init, s2+1, l2) == 0)
return init-1;
else { /* correct `l1' and `s1' to try again */
l1 -= init-s1;
s1 = init;
}
}
return NULL; /* not found */
}
}
static void push_onecapture (MatchState *ms, int i, const char *s,
const char *e) {
if (i >= ms->level) {
if (i == 0) /* ms->level == 0, too */
lua_pushlstring(ms->L, s, e - s); /* add whole match */
else
luaL_error(ms->L, "invalid capture index");
}
else {
ptrdiff_t l = ms->capture[i].len;
if (l == CAP_UNFINISHED) luaL_error(ms->L, "unfinished capture");
if (l == CAP_POSITION)
lua_pushinteger(ms->L, ms->capture[i].init - ms->src_init + 1);
else
lua_pushlstring(ms->L, ms->capture[i].init, l);
}
}
static int push_captures (MatchState *ms, const char *s, const char *e) {
int i;
int nlevels = (ms->level == 0 && s) ? 1 : ms->level;
luaL_checkstack(ms->L, nlevels, "too many captures");
for (i = 0; i < nlevels; i++)
push_onecapture(ms, i, s, e);
return nlevels; /* number of strings pushed */
}
/* check whether pattern has no special characters */
static int nospecials (const char *p, size_t l) {
size_t upto = 0;
do {
if (strpbrk(p + upto, SPECIALS))
return 0; /* pattern has a special character */
upto += strlen(p + upto) + 1; /* may have more after \0 */
} while (upto <= l);
return 1; /* no special chars found */
}
static int str_find_aux (lua_State *L, int find) {
size_t ls, lp;
const char *s = luaL_checklstring(L, 1, &ls);
const char *p = luaL_checklstring(L, 2, &lp);
size_t init = posrelat(luaL_optinteger(L, 3, 1), ls);
if (init < 1) init = 1;
else if (init > ls + 1) { /* start after string's end? */
lua_pushnil(L); /* cannot find anything */
return 1;
}
/* explicit request or no special characters? */
if (find && (lua_toboolean(L, 4) || nospecials(p, lp))) {
/* do a plain search */
const char *s2 = lmemfind(s + init - 1, ls - init + 1, p, lp);
if (s2) {
lua_pushinteger(L, s2 - s + 1);
lua_pushinteger(L, s2 - s + lp);
return 2;
}
}
else {
MatchState ms;
const char *s1 = s + init - 1;
int anchor = (*p == '^');
if (anchor) {
p++; lp--; /* skip anchor character */
}
ms.L = L;
ms.matchdepth = MAXCCALLS;
ms.src_init = s;
ms.src_end = s + ls;
ms.p_end = p + lp;
do {
const char *res;
ms.level = 0;
lua_assert(ms.matchdepth == MAXCCALLS);
if ((res=match(&ms, s1, p)) != NULL) {
if (find) {
lua_pushinteger(L, s1 - s + 1); /* start */
lua_pushinteger(L, res - s); /* end */
return push_captures(&ms, NULL, 0) + 2;
}
else
return push_captures(&ms, s1, res);
}
} while (s1++ < ms.src_end && !anchor);
}
lua_pushnil(L); /* not found */
return 1;
}
static int str_find (lua_State *L) {
return str_find_aux(L, 1);
}
static int str_match (lua_State *L) {
return str_find_aux(L, 0);
}
static int gmatch_aux (lua_State *L) {
MatchState ms;
size_t ls, lp;
const char *s = lua_tolstring(L, lua_upvalueindex(1), &ls);
const char *p = lua_tolstring(L, lua_upvalueindex(2), &lp);
const char *src;
ms.L = L;
ms.matchdepth = MAXCCALLS;
ms.src_init = s;
ms.src_end = s+ls;
ms.p_end = p + lp;
for (src = s + (size_t)lua_tointeger(L, lua_upvalueindex(3));
src <= ms.src_end;
src++) {
const char *e;
ms.level = 0;
lua_assert(ms.matchdepth == MAXCCALLS);
if ((e = match(&ms, src, p)) != NULL) {
lua_Integer newstart = e-s;
if (e == src) newstart++; /* empty match? go at least one position */
lua_pushinteger(L, newstart);
lua_replace(L, lua_upvalueindex(3));
return push_captures(&ms, src, e);
}
}
return 0; /* not found */
}
static int str_gmatch (lua_State *L) {
luaL_checkstring(L, 1);
luaL_checkstring(L, 2);
lua_settop(L, 2);
lua_pushinteger(L, 0);
lua_pushcclosure(L, gmatch_aux, 3);
return 1;
}
static void add_s (MatchState *ms, luaL_Buffer *b, const char *s,
const char *e) {
size_t l, i;
const char *news = lua_tolstring(ms->L, 3, &l);
for (i = 0; i < l; i++) {
if (news[i] != L_ESC)
luaL_addchar(b, news[i]);
else {
i++; /* skip ESC */
if (!isdigit(uchar(news[i]))) {
if (news[i] != L_ESC)
luaL_error(ms->L, "invalid use of " LUA_QL("%c")
" in replacement string", L_ESC);
luaL_addchar(b, news[i]);
}
else if (news[i] == '0')
luaL_addlstring(b, s, e - s);
else {
push_onecapture(ms, news[i] - '1', s, e);
luaL_addvalue(b); /* add capture to accumulated result */
}
}
}
}
static void add_value (MatchState *ms, luaL_Buffer *b, const char *s,
const char *e, int tr) {
lua_State *L = ms->L;
switch (tr) {
case LUA_TFUNCTION: {
int n;
lua_pushvalue(L, 3);
n = push_captures(ms, s, e);
lua_call(L, n, 1);
break;
}
case LUA_TTABLE: {
push_onecapture(ms, 0, s, e);
lua_gettable(L, 3);
break;
}
default: { /* LUA_TNUMBER or LUA_TSTRING */
add_s(ms, b, s, e);
return;
}
}
if (!lua_toboolean(L, -1)) { /* nil or false? */
lua_pop(L, 1);
lua_pushlstring(L, s, e - s); /* keep original text */
}
else if (!lua_isstring(L, -1))
luaL_error(L, "invalid replacement value (a %s)", luaL_typename(L, -1));
luaL_addvalue(b); /* add result to accumulator */
}
static int str_gsub (lua_State *L) {
size_t srcl, lp;
const char *src = luaL_checklstring(L, 1, &srcl);
const char *p = luaL_checklstring(L, 2, &lp);
int tr = lua_type(L, 3);
size_t max_s = luaL_optinteger(L, 4, srcl+1);
int anchor = (*p == '^');
size_t n = 0;
MatchState ms;
luaL_Buffer b;
luaL_argcheck(L, tr == LUA_TNUMBER || tr == LUA_TSTRING ||
tr == LUA_TFUNCTION || tr == LUA_TTABLE, 3,
"string/function/table expected");
luaL_buffinit(L, &b);
if (anchor) {
p++; lp--; /* skip anchor character */
}
ms.L = L;
ms.matchdepth = MAXCCALLS;
ms.src_init = src;
ms.src_end = src+srcl;
ms.p_end = p + lp;
while (n < max_s) {
const char *e;
ms.level = 0;
lua_assert(ms.matchdepth == MAXCCALLS);
e = match(&ms, src, p);
if (e) {
n++;
add_value(&ms, &b, src, e, tr);
}
if (e && e>src) /* non empty match? */
src = e; /* skip it */
else if (src < ms.src_end)
luaL_addchar(&b, *src++);
else break;
if (anchor) break;
}
luaL_addlstring(&b, src, ms.src_end-src);
luaL_pushresult(&b);
lua_pushinteger(L, n); /* number of substitutions */
return 2;
}
/* }====================================================== */
/*
** {======================================================
** STRING FORMAT
** =======================================================
*/
/*
** LUA_INTFRMLEN is the length modifier for integer conversions in
** 'string.format'; LUA_INTFRM_T is the integer type corresponding to
** the previous length
*/
#if !defined(LUA_INTFRMLEN) /* { */
#if defined(LUA_USE_LONGLONG)
#define LUA_INTFRMLEN "ll"
#define LUA_INTFRM_T long long
#else
#define LUA_INTFRMLEN "l"
#define LUA_INTFRM_T long
#endif
#endif /* } */
/*
** LUA_FLTFRMLEN is the length modifier for float conversions in
** 'string.format'; LUA_FLTFRM_T is the float type corresponding to
** the previous length
*/
#if !defined(LUA_FLTFRMLEN)
#define LUA_FLTFRMLEN ""
#define LUA_FLTFRM_T double
#endif
/* maximum size of each formatted item (> len(format('%99.99f', -1e308))) */
#define MAX_ITEM 512
/* valid flags in a format specification */
#define FLAGS "-+ #0"
/*
** maximum size of each format specification (such as '%-099.99d')
** (+10 accounts for %99.99x plus margin of error)
*/
#define MAX_FORMAT (sizeof(FLAGS) + sizeof(LUA_INTFRMLEN) + 10)
static void addquoted (lua_State *L, luaL_Buffer *b, int arg) {
size_t l;
const char *s = luaL_checklstring(L, arg, &l);
luaL_addchar(b, '"');
while (l--) {
if (*s == '"' || *s == '\\' || *s == '\n') {
luaL_addchar(b, '\\');
luaL_addchar(b, *s);
}
else if (*s == '\0' || iscntrl(uchar(*s))) {
char buff[10];
if (!isdigit(uchar(*(s+1))))
snprintf(buff, sizeof(buff), "\\%d", (int)uchar(*s));
else
snprintf(buff, sizeof(buff), "\\%03d", (int)uchar(*s));
luaL_addstring(b, buff);
}
else
luaL_addchar(b, *s);
s++;
}
luaL_addchar(b, '"');
}
static const char *scanformat (lua_State *L, const char *strfrmt, char *form) {
const char *p = strfrmt;
while (*p != '\0' && strchr(FLAGS, *p) != NULL) p++; /* skip flags */
if ((size_t)(p - strfrmt) >= sizeof(FLAGS)/sizeof(char))
luaL_error(L, "invalid format (repeated flags)");
if (isdigit(uchar(*p))) p++; /* skip width */
if (isdigit(uchar(*p))) p++; /* (2 digits at most) */
if (*p == '.') {
p++;
if (isdigit(uchar(*p))) p++; /* skip precision */
if (isdigit(uchar(*p))) p++; /* (2 digits at most) */
}
if (isdigit(uchar(*p)))
luaL_error(L, "invalid format (width or precision too long)");
*(form++) = '%';
memcpy(form, strfrmt, (p - strfrmt + 1) * sizeof(char));
form += p - strfrmt + 1;
*form = '\0';
return p;
}
/*
** add length modifier into formats
*/
static void addlenmod (char *form, const char *lenmod, size_t size) {
size_t l = strlen(form);
size_t lm = strlen(lenmod);
char spec = form[l - 1];
strlcpy(form + l - 1, lenmod, size - (l - 1));
form[l + lm - 1] = spec;
form[l + lm] = '\0';
}
static int str_format (lua_State *L) {
int top = lua_gettop(L);
int arg = 1;
size_t sfl;
const char *strfrmt = luaL_checklstring(L, arg, &sfl);
const char *strfrmt_end = strfrmt+sfl;
luaL_Buffer b;
luaL_buffinit(L, &b);
while (strfrmt < strfrmt_end) {
if (*strfrmt != L_ESC)
luaL_addchar(&b, *strfrmt++);
else if (*++strfrmt == L_ESC)
luaL_addchar(&b, *strfrmt++); /* %% */
else { /* format item */
char form[MAX_FORMAT]; /* to store the format (`%...') */
char *buff = luaL_prepbuffsize(&b, MAX_ITEM); /* to put formatted item */
int nb = 0; /* number of bytes in added item */
if (++arg > top)
luaL_argerror(L, arg, "no value");
strfrmt = scanformat(L, strfrmt, form);
switch (*strfrmt++) {
case 'c': {
nb = str_sprintf(buff, form, luaL_checkint(L, arg));
break;
}
case 'd': case 'i': {
lua_Number n = luaL_checknumber(L, arg);
LUA_INTFRM_T ni = (LUA_INTFRM_T)n;
lua_Number diff = n - (lua_Number)ni;
luaL_argcheck(L, -1 < diff && diff < 1, arg,
"not a number in proper range");
addlenmod(form, LUA_INTFRMLEN, MAX_FORMAT);
nb = str_sprintf(buff, form, ni);
break;
}
case 'o': case 'u': case 'x': case 'X': {
lua_Number n = luaL_checknumber(L, arg);
unsigned LUA_INTFRM_T ni = (unsigned LUA_INTFRM_T)n;
lua_Number diff = n - (lua_Number)ni;
luaL_argcheck(L, -1 < diff && diff < 1, arg,
"not a non-negative number in proper range");
addlenmod(form, LUA_INTFRMLEN, MAX_FORMAT);
nb = str_sprintf(buff, form, ni);
break;
}
#if defined(LUA_USE_FLOAT_FORMATS)
case 'e': case 'E': case 'f':
#if defined(LUA_USE_AFORMAT)
case 'a': case 'A':
#endif
case 'g': case 'G': {
addlenmod(form, LUA_FLTFRMLEN, MAX_FORMAT);
nb = str_sprintf(buff, form, (LUA_FLTFRM_T)luaL_checknumber(L, arg));
break;
}
#endif
case 'q': {
addquoted(L, &b, arg);
break;
}
case 's': {
size_t l;
const char *s = luaL_tolstring(L, arg, &l);
if (!strchr(form, '.') && l >= 100) {
/* no precision and string is too long to be formatted;
keep original string */
luaL_addvalue(&b);
break;
}
else {
nb = str_sprintf(buff, form, s);
lua_pop(L, 1); /* remove result from 'luaL_tolstring' */
break;
}
}
default: { /* also treat cases `pnLlh' */
return luaL_error(L, "invalid option " LUA_QL("%%%c") " to "
LUA_QL("format"), *(strfrmt - 1));
}
}
luaL_addsize(&b, nb);
}
}
luaL_pushresult(&b);
return 1;
}
/* }====================================================== */
static const luaL_Reg strlib[] = {
{"byte", str_byte},
{"char", str_char},
#if defined(LUA_USE_DUMP)
{"dump", str_dump},
#endif
{"find", str_find},
{"format", str_format},
{"gmatch", str_gmatch},
{"gsub", str_gsub},
{"len", str_len},
{"lower", str_lower},
{"match", str_match},
{"rep", str_rep},
{"reverse", str_reverse},
{"sub", str_sub},
{"upper", str_upper},
{NULL, NULL}
};
static void createmetatable (lua_State *L) {
lua_createtable(L, 0, 1); /* table to be metatable for strings */
lua_pushliteral(L, ""); /* dummy string */
lua_pushvalue(L, -2); /* copy table */
lua_setmetatable(L, -2); /* set table as metatable for strings */
lua_pop(L, 1); /* pop dummy string */
lua_pushvalue(L, -2); /* get string library */
lua_setfield(L, -2, "__index"); /* metatable.__index = string */
lua_pop(L, 1); /* pop metatable */
}
/*
** Open string library
*/
LUAMOD_API int luaopen_string (lua_State *L) {
luaL_newlib(L, strlib);
createmetatable(L);
return 1;
}
#if defined(_KERNEL)
EXPORT_SYMBOL(luaopen_string);
#endif
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/lua/ltable.c b/sys/contrib/openzfs/module/lua/ltable.c
index 0ba462cfd885..f6872babc6e7 100644
--- a/sys/contrib/openzfs/module/lua/ltable.c
+++ b/sys/contrib/openzfs/module/lua/ltable.c
@@ -1,592 +1,592 @@
/* BEGIN CSTYLED */
/*
** $Id: ltable.c,v 2.72.1.1 2013/04/12 18:48:47 roberto Exp $
** Lua tables (hash)
** See Copyright Notice in lua.h
*/
/*
** Implementation of tables (aka arrays, objects, or hash tables).
** Tables keep its elements in two parts: an array part and a hash part.
** Non-negative integer keys are all candidates to be kept in the array
** part. The actual size of the array is the largest `n' such that at
** least half the slots between 0 and n are in use.
** Hash uses a mix of chained scatter table with Brent's variation.
** A main invariant of these tables is that, if an element is not
** in its main position (i.e. the `original' position that its hash gives
** to it), then the colliding element is in its own main position.
** Hence even when the load factor reaches 100%, performance remains good.
*/
#define ltable_c
#define LUA_CORE
#include <sys/lua/lua.h>
#include "ldebug.h"
#include "ldo.h"
#include "lgc.h"
#include "lmem.h"
#include "lobject.h"
#include "lstate.h"
#include "lstring.h"
#include "ltable.h"
#include "lvm.h"
/*
** max size of array part is 2^MAXBITS
*/
#if LUAI_BITSINT >= 32
#define MAXBITS 30
#else
#define MAXBITS (LUAI_BITSINT-2)
#endif
#define MAXASIZE (1 << MAXBITS)
#define hashpow2(t,n) (gnode(t, lmod((n), sizenode(t))))
#define hashstr(t,str) hashpow2(t, (str)->tsv.hash)
#define hashboolean(t,p) hashpow2(t, p)
/*
** for some types, it is better to avoid modulus by power of 2, as
** they tend to have many 2 factors.
*/
#define hashmod(t,n) (gnode(t, ((n) % ((sizenode(t)-1)|1))))
#define hashpointer(t,p) hashmod(t, IntPoint(p))
#define dummynode (&dummynode_)
#define isdummy(n) ((n) == dummynode)
static const Node dummynode_ = {
{NILCONSTANT}, /* value */
{{NILCONSTANT, NULL}} /* key */
};
/*
** hash for lua_Numbers
*/
static Node *hashnum (const Table *t, lua_Number n) {
int i;
luai_hashnum(i, n);
if (i < 0) {
if (cast(unsigned int, i) == 0u - i) /* use unsigned to avoid overflows */
i = 0; /* handle INT_MIN */
i = -i; /* must be a positive value */
}
return hashmod(t, i);
}
/*
** returns the `main' position of an element in a table (that is, the index
** of its hash value)
*/
static Node *mainposition (const Table *t, const TValue *key) {
switch (ttype(key)) {
case LUA_TNUMBER:
return hashnum(t, nvalue(key));
case LUA_TLNGSTR: {
TString *s = rawtsvalue(key);
if (s->tsv.extra == 0) { /* no hash? */
s->tsv.hash = luaS_hash(getstr(s), s->tsv.len, s->tsv.hash);
s->tsv.extra = 1; /* now it has its hash */
}
return hashstr(t, rawtsvalue(key));
}
case LUA_TSHRSTR:
return hashstr(t, rawtsvalue(key));
case LUA_TBOOLEAN:
return hashboolean(t, bvalue(key));
case LUA_TLIGHTUSERDATA:
return hashpointer(t, pvalue(key));
case LUA_TLCF:
return hashpointer(t, fvalue(key));
default:
return hashpointer(t, gcvalue(key));
}
}
/*
** returns the index for `key' if `key' is an appropriate key to live in
** the array part of the table, -1 otherwise.
*/
static int arrayindex (const TValue *key) {
if (ttisnumber(key)) {
lua_Number n = nvalue(key);
int k;
lua_number2int(k, n);
if (luai_numeq(cast_num(k), n))
return k;
}
return -1; /* `key' did not match some condition */
}
/*
** returns the index of a `key' for table traversals. First goes all
** elements in the array part, then elements in the hash part. The
** beginning of a traversal is signaled by -1.
*/
static int findindex (lua_State *L, Table *t, StkId key) {
int i;
if (ttisnil(key)) return -1; /* first iteration */
i = arrayindex(key);
if (0 < i && i <= t->sizearray) /* is `key' inside array part? */
return i-1; /* yes; that's the index (corrected to C) */
else {
Node *n = mainposition(t, key);
for (;;) { /* check whether `key' is somewhere in the chain */
/* key may be dead already, but it is ok to use it in `next' */
if (luaV_rawequalobj(gkey(n), key) ||
(ttisdeadkey(gkey(n)) && iscollectable(key) &&
deadvalue(gkey(n)) == gcvalue(key))) {
i = cast_int(n - gnode(t, 0)); /* key index in hash table */
/* hash elements are numbered after array ones */
return i + t->sizearray;
}
else n = gnext(n);
if (n == NULL)
luaG_runerror(L, "invalid key to " LUA_QL("next")); /* key not found */
}
}
}
int luaH_next (lua_State *L, Table *t, StkId key) {
int i = findindex(L, t, key); /* find original element */
for (i++; i < t->sizearray; i++) { /* try first array part */
if (!ttisnil(&t->array[i])) { /* a non-nil value? */
setnvalue(key, cast_num(i+1));
setobj2s(L, key+1, &t->array[i]);
return 1;
}
}
for (i -= t->sizearray; i < sizenode(t); i++) { /* then hash part */
if (!ttisnil(gval(gnode(t, i)))) { /* a non-nil value? */
setobj2s(L, key, gkey(gnode(t, i)));
setobj2s(L, key+1, gval(gnode(t, i)));
return 1;
}
}
return 0; /* no more elements */
}
/*
** {=============================================================
** Rehash
** ==============================================================
*/
static int computesizes (int nums[], int *narray) {
int i;
int twotoi; /* 2^i */
int a = 0; /* number of elements smaller than 2^i */
int na = 0; /* number of elements to go to array part */
int n = 0; /* optimal size for array part */
for (i = 0, twotoi = 1; twotoi/2 < *narray; i++, twotoi *= 2) {
if (nums[i] > 0) {
a += nums[i];
if (a > twotoi/2) { /* more than half elements present? */
n = twotoi; /* optimal size (till now) */
na = a; /* all elements smaller than n will go to array part */
}
}
if (a == *narray) break; /* all elements already counted */
}
*narray = n;
lua_assert(*narray/2 <= na && na <= *narray);
return na;
}
static int countint (const TValue *key, int *nums) {
int k = arrayindex(key);
if (0 < k && k <= MAXASIZE) { /* is `key' an appropriate array index? */
nums[luaO_ceillog2(k)]++; /* count as such */
return 1;
}
else
return 0;
}
static int numusearray (const Table *t, int *nums) {
int lg;
int ttlg; /* 2^lg */
int ause = 0; /* summation of `nums' */
int i = 1; /* count to traverse all array keys */
for (lg=0, ttlg=1; lg<=MAXBITS; lg++, ttlg*=2) { /* for each slice */
int lc = 0; /* counter */
int lim = ttlg;
if (lim > t->sizearray) {
lim = t->sizearray; /* adjust upper limit */
if (i > lim)
break; /* no more elements to count */
}
/* count elements in range (2^(lg-1), 2^lg] */
for (; i <= lim; i++) {
if (!ttisnil(&t->array[i-1]))
lc++;
}
nums[lg] += lc;
ause += lc;
}
return ause;
}
static int numusehash (const Table *t, int *nums, int *pnasize) {
int totaluse = 0; /* total number of elements */
int ause = 0; /* summation of `nums' */
int i = sizenode(t);
while (i--) {
Node *n = &t->node[i];
if (!ttisnil(gval(n))) {
ause += countint(gkey(n), nums);
totaluse++;
}
}
*pnasize += ause;
return totaluse;
}
static void setarrayvector (lua_State *L, Table *t, int size) {
int i;
luaM_reallocvector(L, t->array, t->sizearray, size, TValue);
for (i=t->sizearray; i<size; i++)
setnilvalue(&t->array[i]);
t->sizearray = size;
}
static void setnodevector (lua_State *L, Table *t, int size) {
int lsize;
if (size == 0) { /* no elements to hash part? */
t->node = cast(Node *, dummynode); /* use common `dummynode' */
lsize = 0;
}
else {
int i;
lsize = luaO_ceillog2(size);
if (lsize > MAXBITS)
luaG_runerror(L, "table overflow");
size = twoto(lsize);
t->node = luaM_newvector(L, size, Node);
for (i=0; i<size; i++) {
Node *n = gnode(t, i);
gnext(n) = NULL;
setnilvalue(gkey(n));
setnilvalue(gval(n));
}
}
t->lsizenode = cast_byte(lsize);
t->lastfree = gnode(t, size); /* all positions are free */
}
void luaH_resize (lua_State *L, Table *t, int nasize, int nhsize) {
int i;
int oldasize = t->sizearray;
int oldhsize = t->lsizenode;
Node *nold = t->node; /* save old hash ... */
if (nasize > oldasize) /* array part must grow? */
setarrayvector(L, t, nasize);
/* create new hash part with appropriate size */
setnodevector(L, t, nhsize);
if (nasize < oldasize) { /* array part must shrink? */
t->sizearray = nasize;
/* re-insert elements from vanishing slice */
for (i=nasize; i<oldasize; i++) {
if (!ttisnil(&t->array[i]))
luaH_setint(L, t, i + 1, &t->array[i]);
}
/* shrink array */
luaM_reallocvector(L, t->array, oldasize, nasize, TValue);
}
/* re-insert elements from hash part */
for (i = twoto(oldhsize) - 1; i >= 0; i--) {
Node *old = nold+i;
if (!ttisnil(gval(old))) {
/* doesn't need barrier/invalidate cache, as entry was
already present in the table */
setobjt2t(L, luaH_set(L, t, gkey(old)), gval(old));
}
}
if (!isdummy(nold))
luaM_freearray(L, nold, cast(size_t, twoto(oldhsize))); /* free old array */
}
void luaH_resizearray (lua_State *L, Table *t, int nasize) {
int nsize = isdummy(t->node) ? 0 : sizenode(t);
luaH_resize(L, t, nasize, nsize);
}
static void rehash (lua_State *L, Table *t, const TValue *ek) {
int nasize, na;
int nums[MAXBITS+1]; /* nums[i] = number of keys with 2^(i-1) < k <= 2^i */
int i;
int totaluse;
for (i=0; i<=MAXBITS; i++) nums[i] = 0; /* reset counts */
nasize = numusearray(t, nums); /* count keys in array part */
totaluse = nasize; /* all those keys are integer keys */
totaluse += numusehash(t, nums, &nasize); /* count keys in hash part */
/* count extra key */
nasize += countint(ek, nums);
totaluse++;
/* compute new size for array part */
na = computesizes(nums, &nasize);
/* resize the table to new computed sizes */
luaH_resize(L, t, nasize, totaluse - na);
}
/*
** }=============================================================
*/
Table *luaH_new (lua_State *L) {
Table *t = &luaC_newobj(L, LUA_TTABLE, sizeof(Table), NULL, 0)->h;
t->metatable = NULL;
t->flags = cast_byte(~0);
t->array = NULL;
t->sizearray = 0;
setnodevector(L, t, 0);
return t;
}
void luaH_free (lua_State *L, Table *t) {
if (!isdummy(t->node))
luaM_freearray(L, t->node, cast(size_t, sizenode(t)));
luaM_freearray(L, t->array, t->sizearray);
luaM_free(L, t);
}
static Node *getfreepos (Table *t) {
while (t->lastfree > t->node) {
t->lastfree--;
if (ttisnil(gkey(t->lastfree)))
return t->lastfree;
}
return NULL; /* could not find a free place */
}
/*
** inserts a new key into a hash table; first, check whether key's main
** position is free. If not, check whether colliding node is in its main
** position or not: if it is not, move colliding node to an empty place and
** put new key in its main position; otherwise (colliding node is in its main
** position), new key goes to an empty position.
*/
TValue *luaH_newkey (lua_State *L, Table *t, const TValue *key) {
Node *mp;
if (ttisnil(key)) luaG_runerror(L, "table index is nil");
#if defined LUA_HAS_FLOAT_NUMBERS
else if (ttisnumber(key) && luai_numisnan(L, nvalue(key)))
luaG_runerror(L, "table index is NaN");
#endif
mp = mainposition(t, key);
if (!ttisnil(gval(mp)) || isdummy(mp)) { /* main position is taken? */
Node *othern;
Node *n = getfreepos(t); /* get a free place */
if (n == NULL) { /* cannot find a free place? */
rehash(L, t, key); /* grow table */
/* whatever called 'newkey' take care of TM cache and GC barrier */
return luaH_set(L, t, key); /* insert key into grown table */
}
lua_assert(!isdummy(n));
othern = mainposition(t, gkey(mp));
if (othern != mp) { /* is colliding node out of its main position? */
/* yes; move colliding node into free position */
while (gnext(othern) != mp) othern = gnext(othern); /* find previous */
gnext(othern) = n; /* redo the chain with `n' in place of `mp' */
*n = *mp; /* copy colliding node into free pos. (mp->next also goes) */
gnext(mp) = NULL; /* now `mp' is free */
setnilvalue(gval(mp));
}
else { /* colliding node is in its own main position */
/* new node will go into free position */
gnext(n) = gnext(mp); /* chain new position */
gnext(mp) = n;
mp = n;
}
}
setobj2t(L, gkey(mp), key);
luaC_barrierback(L, obj2gco(t), key);
lua_assert(ttisnil(gval(mp)));
return gval(mp);
}
/*
** search function for integers
*/
const TValue *luaH_getint (Table *t, int key) {
/* (1 <= key && key <= t->sizearray) */
if (cast(unsigned int, key-1) < cast(unsigned int, t->sizearray))
return &t->array[key-1];
else {
lua_Number nk = cast_num(key);
Node *n = hashnum(t, nk);
do { /* check whether `key' is somewhere in the chain */
if (ttisnumber(gkey(n)) && luai_numeq(nvalue(gkey(n)), nk))
return gval(n); /* that's it */
else n = gnext(n);
} while (n);
return luaO_nilobject;
}
}
/*
** search function for short strings
*/
const TValue *luaH_getstr (Table *t, TString *key) {
Node *n = hashstr(t, key);
lua_assert(key->tsv.tt == LUA_TSHRSTR);
do { /* check whether `key' is somewhere in the chain */
if (ttisshrstring(gkey(n)) && eqshrstr(rawtsvalue(gkey(n)), key))
return gval(n); /* that's it */
else n = gnext(n);
} while (n);
return luaO_nilobject;
}
/*
** main search function
*/
const TValue *luaH_get (Table *t, const TValue *key) {
switch (ttype(key)) {
case LUA_TSHRSTR: return luaH_getstr(t, rawtsvalue(key));
case LUA_TNIL: return luaO_nilobject;
case LUA_TNUMBER: {
int k;
lua_Number n = nvalue(key);
lua_number2int(k, n);
if (luai_numeq(cast_num(k), n)) /* index is int? */
return luaH_getint(t, k); /* use specialized version */
/* else go through */
}
- /* FALLTHROUGH */
+ fallthrough;
default: {
Node *n = mainposition(t, key);
do { /* check whether `key' is somewhere in the chain */
if (luaV_rawequalobj(gkey(n), key))
return gval(n); /* that's it */
else n = gnext(n);
} while (n);
return luaO_nilobject;
}
}
}
/*
** beware: when using this function you probably need to check a GC
** barrier and invalidate the TM cache.
*/
TValue *luaH_set (lua_State *L, Table *t, const TValue *key) {
const TValue *p = luaH_get(t, key);
if (p != luaO_nilobject)
return cast(TValue *, p);
else return luaH_newkey(L, t, key);
}
void luaH_setint (lua_State *L, Table *t, int key, TValue *value) {
const TValue *p = luaH_getint(t, key);
TValue *cell;
if (p != luaO_nilobject)
cell = cast(TValue *, p);
else {
TValue k;
setnvalue(&k, cast_num(key));
cell = luaH_newkey(L, t, &k);
}
setobj2t(L, cell, value);
}
static int unbound_search (Table *t, unsigned int j) {
unsigned int i = j; /* i is zero or a present index */
j++;
/* find `i' and `j' such that i is present and j is not */
while (!ttisnil(luaH_getint(t, j))) {
i = j;
j *= 2;
if (j > cast(unsigned int, MAX_INT)) { /* overflow? */
/* table was built with bad purposes: resort to linear search */
i = 1;
while (!ttisnil(luaH_getint(t, i))) i++;
return i - 1;
}
}
/* now do a binary search between them */
while (j - i > 1) {
unsigned int m = (i+j)/2;
if (ttisnil(luaH_getint(t, m))) j = m;
else i = m;
}
return i;
}
/*
** Try to find a boundary in table `t'. A `boundary' is an integer index
** such that t[i] is non-nil and t[i+1] is nil (and 0 if t[1] is nil).
*/
int luaH_getn (Table *t) {
unsigned int j = t->sizearray;
if (j > 0 && ttisnil(&t->array[j - 1])) {
/* there is a boundary in the array part: (binary) search for it */
unsigned int i = 0;
while (j - i > 1) {
unsigned int m = (i+j)/2;
if (ttisnil(&t->array[m - 1])) j = m;
else i = m;
}
return i;
}
/* else must find a boundary in hash part */
else if (isdummy(t->node)) /* hash part is empty? */
return j; /* that is easy... */
else return unbound_search(t, j);
}
#if defined(LUA_DEBUG)
Node *luaH_mainposition (const Table *t, const TValue *key) {
return mainposition(t, key);
}
int luaH_isdummy (Node *n) { return isdummy(n); }
#endif
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/os/freebsd/spl/spl_vfs.c b/sys/contrib/openzfs/module/os/freebsd/spl/spl_vfs.c
index 60ea627e975b..3f4feb140d5e 100644
--- a/sys/contrib/openzfs/module/os/freebsd/spl/spl_vfs.c
+++ b/sys/contrib/openzfs/module/os/freebsd/spl/spl_vfs.c
@@ -1,287 +1,291 @@
/*
* Copyright (c) 2006-2007 Pawel Jakub Dawidek <pjd@FreeBSD.org>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
* SUCH DAMAGE.
*/
#include <sys/cdefs.h>
__FBSDID("$FreeBSD$");
#include <sys/types.h>
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/malloc.h>
#include <sys/mount.h>
#include <sys/cred.h>
#include <sys/vfs.h>
#include <sys/priv.h>
#include <sys/libkern.h>
#include <sys/mutex.h>
#include <sys/vnode.h>
#include <sys/taskq.h>
#include <sys/ccompat.h>
MALLOC_DECLARE(M_MOUNT);
void
vfs_setmntopt(vfs_t *vfsp, const char *name, const char *arg,
int flags __unused)
{
struct vfsopt *opt;
size_t namesize;
int locked;
if (!(locked = mtx_owned(MNT_MTX(vfsp))))
MNT_ILOCK(vfsp);
if (vfsp->mnt_opt == NULL) {
void *opts;
MNT_IUNLOCK(vfsp);
opts = malloc(sizeof (*vfsp->mnt_opt), M_MOUNT, M_WAITOK);
MNT_ILOCK(vfsp);
if (vfsp->mnt_opt == NULL) {
vfsp->mnt_opt = opts;
TAILQ_INIT(vfsp->mnt_opt);
} else {
free(opts, M_MOUNT);
}
}
MNT_IUNLOCK(vfsp);
opt = malloc(sizeof (*opt), M_MOUNT, M_WAITOK);
namesize = strlen(name) + 1;
opt->name = malloc(namesize, M_MOUNT, M_WAITOK);
strlcpy(opt->name, name, namesize);
opt->pos = -1;
opt->seen = 1;
if (arg == NULL) {
opt->value = NULL;
opt->len = 0;
} else {
opt->len = strlen(arg) + 1;
opt->value = malloc(opt->len, M_MOUNT, M_WAITOK);
bcopy(arg, opt->value, opt->len);
}
MNT_ILOCK(vfsp);
TAILQ_INSERT_TAIL(vfsp->mnt_opt, opt, link);
if (!locked)
MNT_IUNLOCK(vfsp);
}
void
vfs_clearmntopt(vfs_t *vfsp, const char *name)
{
int locked;
if (!(locked = mtx_owned(MNT_MTX(vfsp))))
MNT_ILOCK(vfsp);
vfs_deleteopt(vfsp->mnt_opt, name);
if (!locked)
MNT_IUNLOCK(vfsp);
}
int
vfs_optionisset(const vfs_t *vfsp, const char *opt, char **argp)
{
struct vfsoptlist *opts = vfsp->mnt_optnew;
int error;
if (opts == NULL)
return (0);
error = vfs_getopt(opts, opt, (void **)argp, NULL);
return (error != 0 ? 0 : 1);
}
int
mount_snapshot(kthread_t *td, vnode_t **vpp, const char *fstype, char *fspath,
char *fspec, int fsflags)
{
struct vfsconf *vfsp;
struct mount *mp;
vnode_t *vp, *mvp;
- struct ucred *cr;
+ struct ucred *pcr, *tcr;
int error;
ASSERT_VOP_ELOCKED(*vpp, "mount_snapshot");
vp = *vpp;
*vpp = NULL;
error = 0;
/*
* Be ultra-paranoid about making sure the type and fspath
* variables will fit in our mp buffers, including the
* terminating NUL.
*/
if (strlen(fstype) >= MFSNAMELEN || strlen(fspath) >= MNAMELEN)
error = ENAMETOOLONG;
if (error == 0 && (vfsp = vfs_byname_kld(fstype, td, &error)) == NULL)
error = ENODEV;
if (error == 0 && vp->v_type != VDIR)
error = ENOTDIR;
/*
* We need vnode lock to protect v_mountedhere and vnode interlock
* to protect v_iflag.
*/
if (error == 0) {
VI_LOCK(vp);
if ((vp->v_iflag & VI_MOUNT) == 0 && vp->v_mountedhere == NULL)
vp->v_iflag |= VI_MOUNT;
else
error = EBUSY;
VI_UNLOCK(vp);
}
if (error != 0) {
vput(vp);
return (error);
}
vn_seqc_write_begin(vp);
VOP_UNLOCK1(vp);
/*
* Allocate and initialize the filesystem.
* We don't want regular user that triggered snapshot mount to be able
* to unmount it, so pass credentials of the parent mount.
*/
mp = vfs_mount_alloc(vp, vfsp, fspath, vp->v_mount->mnt_cred);
mp->mnt_optnew = NULL;
vfs_setmntopt(mp, "from", fspec, 0);
mp->mnt_optnew = mp->mnt_opt;
mp->mnt_opt = NULL;
/*
* Set the mount level flags.
*/
mp->mnt_flag = fsflags & MNT_UPDATEMASK;
/*
* Snapshots are always read-only.
*/
mp->mnt_flag |= MNT_RDONLY;
/*
* We don't want snapshots to allow access to vulnerable setuid
* programs, so we turn off setuid when mounting snapshots.
*/
mp->mnt_flag |= MNT_NOSUID;
/*
* We don't want snapshots to be visible in regular
* mount(8) and df(1) output.
*/
mp->mnt_flag |= MNT_IGNORE;
+
/*
* XXX: This is evil, but we can't mount a snapshot as a regular user.
* XXX: Is is safe when snapshot is mounted from within a jail?
*/
- cr = td->td_ucred;
+ tcr = td->td_ucred;
+ pcr = td->td_proc->p_ucred;
td->td_ucred = kcred;
+ td->td_proc->p_ucred = kcred;
error = VFS_MOUNT(mp);
- td->td_ucred = cr;
+ td->td_ucred = tcr;
+ td->td_proc->p_ucred = pcr;
if (error != 0) {
/*
* Clear VI_MOUNT and decrement the use count "atomically",
* under the vnode lock. This is not strictly required,
* but makes it easier to reason about the life-cycle and
* ownership of the covered vnode.
*/
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
VI_LOCK(vp);
vp->v_iflag &= ~VI_MOUNT;
VI_UNLOCK(vp);
vn_seqc_write_end(vp);
vput(vp);
vfs_unbusy(mp);
vfs_freeopts(mp->mnt_optnew);
mp->mnt_vnodecovered = NULL;
vfs_mount_destroy(mp);
return (error);
}
if (mp->mnt_opt != NULL)
vfs_freeopts(mp->mnt_opt);
mp->mnt_opt = mp->mnt_optnew;
(void) VFS_STATFS(mp, &mp->mnt_stat);
/*
* Prevent external consumers of mount options from reading
* mnt_optnew.
*/
mp->mnt_optnew = NULL;
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
#ifdef FREEBSD_NAMECACHE
cache_purge(vp);
#endif
VI_LOCK(vp);
vp->v_iflag &= ~VI_MOUNT;
#ifdef VIRF_MOUNTPOINT
vn_irflag_set_locked(vp, VIRF_MOUNTPOINT);
#endif
vp->v_mountedhere = mp;
VI_UNLOCK(vp);
/* Put the new filesystem on the mount list. */
mtx_lock(&mountlist_mtx);
TAILQ_INSERT_TAIL(&mountlist, mp, mnt_list);
mtx_unlock(&mountlist_mtx);
vfs_event_signal(NULL, VQ_MOUNT, 0);
if (VFS_ROOT(mp, LK_EXCLUSIVE, &mvp))
panic("mount: lost mount");
vn_seqc_write_end(vp);
VOP_UNLOCK1(vp);
#if __FreeBSD_version >= 1300048
vfs_op_exit(mp);
#endif
vfs_unbusy(mp);
*vpp = mvp;
return (0);
}
/*
* Like vn_rele() except if we are going to call VOP_INACTIVE() then do it
* asynchronously using a taskq. This can avoid deadlocks caused by re-entering
* the file system as a result of releasing the vnode. Note, file systems
* already have to handle the race where the vnode is incremented before the
* inactive routine is called and does its locking.
*
* Warning: Excessive use of this routine can lead to performance problems.
* This is because taskqs throttle back allocation if too many are created.
*/
void
vn_rele_async(vnode_t *vp, taskq_t *taskq)
{
VERIFY3U(vp->v_usecount, >, 0);
if (refcount_release_if_not_last(&vp->v_usecount)) {
#if __FreeBSD_version < 1300045
vdrop(vp);
#endif
return;
}
VERIFY3U(taskq_dispatch((taskq_t *)taskq,
(task_func_t *)vrele, vp, TQ_SLEEP), !=, 0);
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
index 9d42755b963b..ae758bcefe21 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_acl.c
@@ -1,2672 +1,2672 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/unistd.h>
#include <sys/sdt.h>
#include <sys/fs/zfs.h>
#include <sys/policy.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_vfsops.h>
#include <sys/dmu.h>
#include <sys/dnode.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <acl/acl_common.h>
#define ALLOW ACE_ACCESS_ALLOWED_ACE_TYPE
#define DENY ACE_ACCESS_DENIED_ACE_TYPE
#define MAX_ACE_TYPE ACE_SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE
#define MIN_ACE_TYPE ALLOW
#define OWNING_GROUP (ACE_GROUP|ACE_IDENTIFIER_GROUP)
#define EVERYONE_ALLOW_MASK (ACE_READ_ACL|ACE_READ_ATTRIBUTES | \
ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE)
#define EVERYONE_DENY_MASK (ACE_WRITE_ACL|ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define OWNER_ALLOW_MASK (ACE_WRITE_ACL | ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define ZFS_CHECKED_MASKS (ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_DATA| \
ACE_READ_NAMED_ATTRS|ACE_WRITE_DATA|ACE_WRITE_ATTRIBUTES| \
ACE_WRITE_NAMED_ATTRS|ACE_APPEND_DATA|ACE_EXECUTE|ACE_WRITE_OWNER| \
ACE_WRITE_ACL|ACE_DELETE|ACE_DELETE_CHILD|ACE_SYNCHRONIZE)
#define WRITE_MASK_DATA (ACE_WRITE_DATA|ACE_APPEND_DATA|ACE_WRITE_NAMED_ATTRS)
#define WRITE_MASK_ATTRS (ACE_WRITE_ACL|ACE_WRITE_OWNER|ACE_WRITE_ATTRIBUTES| \
ACE_DELETE|ACE_DELETE_CHILD)
#define WRITE_MASK (WRITE_MASK_DATA|WRITE_MASK_ATTRS)
#define OGE_CLEAR (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define OKAY_MASK_BITS (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define ALL_INHERIT (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE | \
ACE_NO_PROPAGATE_INHERIT_ACE|ACE_INHERIT_ONLY_ACE|ACE_INHERITED_ACE)
#define RESTRICTED_CLEAR (ACE_WRITE_ACL|ACE_WRITE_OWNER)
#define V4_ACL_WIDE_FLAGS (ZFS_ACL_AUTO_INHERIT|ZFS_ACL_DEFAULTED|\
ZFS_ACL_PROTECTED)
#define ZFS_ACL_WIDE_FLAGS (V4_ACL_WIDE_FLAGS|ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|\
ZFS_ACL_OBJ_ACE)
#define ALL_MODE_EXECS (S_IXUSR | S_IXGRP | S_IXOTH)
static uint16_t
zfs_ace_v0_get_type(void *acep)
{
return (((zfs_oldace_t *)acep)->z_type);
}
static uint16_t
zfs_ace_v0_get_flags(void *acep)
{
return (((zfs_oldace_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_v0_get_mask(void *acep)
{
return (((zfs_oldace_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_v0_get_who(void *acep)
{
return (((zfs_oldace_t *)acep)->z_fuid);
}
static void
zfs_ace_v0_set_type(void *acep, uint16_t type)
{
((zfs_oldace_t *)acep)->z_type = type;
}
static void
zfs_ace_v0_set_flags(void *acep, uint16_t flags)
{
((zfs_oldace_t *)acep)->z_flags = flags;
}
static void
zfs_ace_v0_set_mask(void *acep, uint32_t mask)
{
((zfs_oldace_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_v0_set_who(void *acep, uint64_t who)
{
((zfs_oldace_t *)acep)->z_fuid = who;
}
/*ARGSUSED*/
static size_t
zfs_ace_v0_size(void *acep)
{
return (sizeof (zfs_oldace_t));
}
static size_t
zfs_ace_v0_abstract_size(void)
{
return (sizeof (zfs_oldace_t));
}
static int
zfs_ace_v0_mask_off(void)
{
return (offsetof(zfs_oldace_t, z_access_mask));
}
/*ARGSUSED*/
static int
zfs_ace_v0_data(void *acep, void **datap)
{
*datap = NULL;
return (0);
}
static acl_ops_t zfs_acl_v0_ops = {
zfs_ace_v0_get_mask,
zfs_ace_v0_set_mask,
zfs_ace_v0_get_flags,
zfs_ace_v0_set_flags,
zfs_ace_v0_get_type,
zfs_ace_v0_set_type,
zfs_ace_v0_get_who,
zfs_ace_v0_set_who,
zfs_ace_v0_size,
zfs_ace_v0_abstract_size,
zfs_ace_v0_mask_off,
zfs_ace_v0_data
};
static uint16_t
zfs_ace_fuid_get_type(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_type);
}
static uint16_t
zfs_ace_fuid_get_flags(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_fuid_get_mask(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_fuid_get_who(void *args)
{
uint16_t entry_type;
zfs_ace_t *acep = args;
entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (-1);
return (((zfs_ace_t *)acep)->z_fuid);
}
static void
zfs_ace_fuid_set_type(void *acep, uint16_t type)
{
((zfs_ace_hdr_t *)acep)->z_type = type;
}
static void
zfs_ace_fuid_set_flags(void *acep, uint16_t flags)
{
((zfs_ace_hdr_t *)acep)->z_flags = flags;
}
static void
zfs_ace_fuid_set_mask(void *acep, uint32_t mask)
{
((zfs_ace_hdr_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_fuid_set_who(void *arg, uint64_t who)
{
zfs_ace_t *acep = arg;
uint16_t entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return;
acep->z_fuid = who;
}
static size_t
zfs_ace_fuid_size(void *acep)
{
zfs_ace_hdr_t *zacep = acep;
uint16_t entry_type;
switch (zacep->z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
return (sizeof (zfs_object_ace_t));
case ALLOW:
case DENY:
entry_type =
(((zfs_ace_hdr_t *)acep)->z_flags & ACE_TYPE_FLAGS);
if (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (sizeof (zfs_ace_hdr_t));
- /* FALLTHROUGH */
+ fallthrough;
default:
return (sizeof (zfs_ace_t));
}
}
static size_t
zfs_ace_fuid_abstract_size(void)
{
return (sizeof (zfs_ace_hdr_t));
}
static int
zfs_ace_fuid_mask_off(void)
{
return (offsetof(zfs_ace_hdr_t, z_access_mask));
}
static int
zfs_ace_fuid_data(void *acep, void **datap)
{
zfs_ace_t *zacep = acep;
zfs_object_ace_t *zobjp;
switch (zacep->z_hdr.z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjp = acep;
*datap = (caddr_t)zobjp + sizeof (zfs_ace_t);
return (sizeof (zfs_object_ace_t) - sizeof (zfs_ace_t));
default:
*datap = NULL;
return (0);
}
}
static acl_ops_t zfs_acl_fuid_ops = {
zfs_ace_fuid_get_mask,
zfs_ace_fuid_set_mask,
zfs_ace_fuid_get_flags,
zfs_ace_fuid_set_flags,
zfs_ace_fuid_get_type,
zfs_ace_fuid_set_type,
zfs_ace_fuid_get_who,
zfs_ace_fuid_set_who,
zfs_ace_fuid_size,
zfs_ace_fuid_abstract_size,
zfs_ace_fuid_mask_off,
zfs_ace_fuid_data
};
/*
* The following three functions are provided for compatibility with
* older ZPL version in order to determine if the file use to have
* an external ACL and what version of ACL previously existed on the
* file. Would really be nice to not need this, sigh.
*/
uint64_t
zfs_external_acl(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
int error;
if (zp->z_is_sa)
return (0);
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zp->z_zfsvfs),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_extern_obj);
else {
/*
* after upgrade the SA_ZPL_ZNODE_ACL should have been
* removed
*/
VERIFY(zp->z_is_sa);
VERIFY3S(error, ==, ENOENT);
return (0);
}
}
/*
* Determine size of ACL in bytes
*
* This is more complicated than it should be since we have to deal
* with old external ACLs.
*/
static int
zfs_acl_znode_info(znode_t *zp, int *aclsize, int *aclcount,
zfs_acl_phys_t *aclphys)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
uint64_t acl_count;
int size;
int error;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_is_sa) {
if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs),
&size)) != 0)
return (error);
*aclsize = size;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zfsvfs),
&acl_count, sizeof (acl_count))) != 0)
return (error);
*aclcount = acl_count;
} else {
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
aclphys, sizeof (*aclphys))) != 0)
return (error);
if (aclphys->z_acl_version == ZFS_ACL_VERSION_INITIAL) {
*aclsize = ZFS_ACL_SIZE(aclphys->z_acl_size);
*aclcount = aclphys->z_acl_size;
} else {
*aclsize = aclphys->z_acl_size;
*aclcount = aclphys->z_acl_count;
}
}
return (0);
}
int
zfs_znode_acl_version(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
if (zp->z_is_sa)
return (ZFS_ACL_VERSION_FUID);
else {
int error;
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl,
SA_ZPL_ZNODE_ACL(zp->z_zfsvfs),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_version);
else {
/*
* After upgrade SA_ZPL_ZNODE_ACL should have
* been removed.
*/
VERIFY(zp->z_is_sa);
VERIFY3S(error, ==, ENOENT);
return (ZFS_ACL_VERSION_FUID);
}
}
}
static int
zfs_acl_version(int version)
{
if (version < ZPL_VERSION_FUID)
return (ZFS_ACL_VERSION_INITIAL);
else
return (ZFS_ACL_VERSION_FUID);
}
static int
zfs_acl_version_zp(znode_t *zp)
{
return (zfs_acl_version(zp->z_zfsvfs->z_version));
}
zfs_acl_t *
zfs_acl_alloc(int vers)
{
zfs_acl_t *aclp;
aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_SLEEP);
list_create(&aclp->z_acl, sizeof (zfs_acl_node_t),
offsetof(zfs_acl_node_t, z_next));
aclp->z_version = vers;
if (vers == ZFS_ACL_VERSION_FUID)
aclp->z_ops = &zfs_acl_fuid_ops;
else
aclp->z_ops = &zfs_acl_v0_ops;
return (aclp);
}
zfs_acl_node_t *
zfs_acl_node_alloc(size_t bytes)
{
zfs_acl_node_t *aclnode;
aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_SLEEP);
if (bytes) {
aclnode->z_acldata = kmem_alloc(bytes, KM_SLEEP);
aclnode->z_allocdata = aclnode->z_acldata;
aclnode->z_allocsize = bytes;
aclnode->z_size = bytes;
}
return (aclnode);
}
static void
zfs_acl_node_free(zfs_acl_node_t *aclnode)
{
if (aclnode->z_allocsize)
kmem_free(aclnode->z_allocdata, aclnode->z_allocsize);
kmem_free(aclnode, sizeof (zfs_acl_node_t));
}
static void
zfs_acl_release_nodes(zfs_acl_t *aclp)
{
zfs_acl_node_t *aclnode;
while ((aclnode = list_head(&aclp->z_acl))) {
list_remove(&aclp->z_acl, aclnode);
zfs_acl_node_free(aclnode);
}
aclp->z_acl_count = 0;
aclp->z_acl_bytes = 0;
}
void
zfs_acl_free(zfs_acl_t *aclp)
{
zfs_acl_release_nodes(aclp);
list_destroy(&aclp->z_acl);
kmem_free(aclp, sizeof (zfs_acl_t));
}
static boolean_t
zfs_acl_valid_ace_type(uint_t type, uint_t flags)
{
uint16_t entry_type;
switch (type) {
case ALLOW:
case DENY:
case ACE_SYSTEM_AUDIT_ACE_TYPE:
case ACE_SYSTEM_ALARM_ACE_TYPE:
entry_type = flags & ACE_TYPE_FLAGS;
return (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE || entry_type == 0 ||
entry_type == ACE_IDENTIFIER_GROUP);
default:
if (type >= MIN_ACE_TYPE && type <= MAX_ACE_TYPE)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
zfs_ace_valid(vtype_t obj_type, zfs_acl_t *aclp, uint16_t type, uint16_t iflags)
{
/*
* first check type of entry
*/
if (!zfs_acl_valid_ace_type(type, iflags))
return (B_FALSE);
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (aclp->z_version < ZFS_ACL_VERSION_FUID)
return (B_FALSE);
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
}
/*
* next check inheritance level flags
*/
if (obj_type == VDIR &&
(iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if (iflags & (ACE_INHERIT_ONLY_ACE|ACE_NO_PROPAGATE_INHERIT_ACE)) {
if ((iflags & (ACE_FILE_INHERIT_ACE|
ACE_DIRECTORY_INHERIT_ACE)) == 0) {
return (B_FALSE);
}
}
return (B_TRUE);
}
static void *
zfs_acl_next_ace(zfs_acl_t *aclp, void *start, uint64_t *who,
uint32_t *access_mask, uint16_t *iflags, uint16_t *type)
{
zfs_acl_node_t *aclnode;
ASSERT3P(aclp, !=, NULL);
if (start == NULL) {
aclnode = list_head(&aclp->z_acl);
if (aclnode == NULL)
return (NULL);
aclp->z_next_ace = aclnode->z_acldata;
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
}
aclnode = aclp->z_curr_node;
if (aclnode == NULL)
return (NULL);
if (aclnode->z_ace_idx >= aclnode->z_ace_count) {
aclnode = list_next(&aclp->z_acl, aclnode);
if (aclnode == NULL)
return (NULL);
else {
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
aclp->z_next_ace = aclnode->z_acldata;
}
}
if (aclnode->z_ace_idx < aclnode->z_ace_count) {
void *acep = aclp->z_next_ace;
size_t ace_size;
/*
* Make sure we don't overstep our bounds
*/
ace_size = aclp->z_ops->ace_size(acep);
if (((caddr_t)acep + ace_size) >
((caddr_t)aclnode->z_acldata + aclnode->z_size)) {
return (NULL);
}
*iflags = aclp->z_ops->ace_flags_get(acep);
*type = aclp->z_ops->ace_type_get(acep);
*access_mask = aclp->z_ops->ace_mask_get(acep);
*who = aclp->z_ops->ace_who_get(acep);
aclp->z_next_ace = (caddr_t)aclp->z_next_ace + ace_size;
aclnode->z_ace_idx++;
return ((void *)acep);
}
return (NULL);
}
/*ARGSUSED*/
static uint64_t
zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt,
uint16_t *flags, uint16_t *type, uint32_t *mask)
{
zfs_acl_t *aclp = datap;
zfs_ace_hdr_t *acep = (zfs_ace_hdr_t *)(uintptr_t)cookie;
uint64_t who;
acep = zfs_acl_next_ace(aclp, acep, &who, mask,
flags, type);
return ((uint64_t)(uintptr_t)acep);
}
/*
* Copy ACE to internal ZFS format.
* While processing the ACL each ACE will be validated for correctness.
* ACE FUIDs will be created later.
*/
static int
zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, vtype_t obj_type, zfs_acl_t *aclp,
void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size,
zfs_fuid_info_t **fuidp, cred_t *cr)
{
int i;
uint16_t entry_type;
zfs_ace_t *aceptr = z_acl;
ace_t *acep = datap;
zfs_object_ace_t *zobjacep;
ace_object_t *aceobjp;
for (i = 0; i != aclcnt; i++) {
aceptr->z_hdr.z_access_mask = acep->a_access_mask;
aceptr->z_hdr.z_flags = acep->a_flags;
aceptr->z_hdr.z_type = acep->a_type;
entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE) {
aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who,
cr, (entry_type == 0) ?
ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp);
}
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_type, aclp, aceptr->z_hdr.z_type,
aceptr->z_hdr.z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
switch (acep->a_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjacep = (zfs_object_ace_t *)aceptr;
aceobjp = (ace_object_t *)acep;
bcopy(aceobjp->a_obj_type, zobjacep->z_object_type,
sizeof (aceobjp->a_obj_type));
bcopy(aceobjp->a_inherit_obj_type,
zobjacep->z_inherit_type,
sizeof (aceobjp->a_inherit_obj_type));
acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t));
break;
default:
acep = (ace_t *)((caddr_t)acep + sizeof (ace_t));
}
aceptr = (zfs_ace_t *)((caddr_t)aceptr +
aclp->z_ops->ace_size(aceptr));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* Copy ZFS ACEs to fixed size ace_t layout
*/
static void
zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
void *datap, int filter)
{
uint64_t who;
uint32_t access_mask;
uint16_t iflags, type;
zfs_ace_hdr_t *zacep = NULL;
ace_t *acep = datap;
ace_object_t *objacep;
zfs_object_ace_t *zobjacep;
size_t ace_size;
uint16_t entry_type;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (filter) {
continue;
}
zobjacep = (zfs_object_ace_t *)zacep;
objacep = (ace_object_t *)acep;
bcopy(zobjacep->z_object_type,
objacep->a_obj_type,
sizeof (zobjacep->z_object_type));
bcopy(zobjacep->z_inherit_type,
objacep->a_inherit_obj_type,
sizeof (zobjacep->z_inherit_type));
ace_size = sizeof (ace_object_t);
break;
default:
ace_size = sizeof (ace_t);
break;
}
entry_type = (iflags & ACE_TYPE_FLAGS);
if ((entry_type != ACE_OWNER &&
entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE)) {
acep->a_who = zfs_fuid_map_id(zfsvfs, who,
cr, (entry_type & ACE_IDENTIFIER_GROUP) ?
ZFS_ACE_GROUP : ZFS_ACE_USER);
} else {
acep->a_who = (uid_t)(int64_t)who;
}
acep->a_access_mask = access_mask;
acep->a_flags = iflags;
acep->a_type = type;
acep = (ace_t *)((caddr_t)acep + ace_size);
}
}
static int
zfs_copy_ace_2_oldace(vtype_t obj_type, zfs_acl_t *aclp, ace_t *acep,
zfs_oldace_t *z_acl, int aclcnt, size_t *size)
{
int i;
zfs_oldace_t *aceptr = z_acl;
for (i = 0; i != aclcnt; i++, aceptr++) {
aceptr->z_access_mask = acep[i].a_access_mask;
aceptr->z_type = acep[i].a_type;
aceptr->z_flags = acep[i].a_flags;
aceptr->z_fuid = acep[i].a_who;
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_type, aclp, aceptr->z_type,
aceptr->z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* convert old ACL format to new
*/
void
zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr)
{
zfs_oldace_t *oldaclp;
int i;
uint16_t type, iflags;
uint32_t access_mask;
uint64_t who;
void *cookie = NULL;
zfs_acl_node_t *newaclnode;
ASSERT3U(aclp->z_version, ==, ZFS_ACL_VERSION_INITIAL);
/*
* First create the ACE in a contiguous piece of memory
* for zfs_copy_ace_2_fuid().
*
* We only convert an ACL once, so this won't happen
* everytime.
*/
oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count,
KM_SLEEP);
i = 0;
while ((cookie = zfs_acl_next_ace(aclp, cookie, &who,
&access_mask, &iflags, &type))) {
oldaclp[i].z_flags = iflags;
oldaclp[i].z_type = type;
oldaclp[i].z_fuid = who;
oldaclp[i++].z_access_mask = access_mask;
}
newaclnode = zfs_acl_node_alloc(aclp->z_acl_count *
sizeof (zfs_object_ace_t));
aclp->z_ops = &zfs_acl_fuid_ops;
VERIFY0(zfs_copy_ace_2_fuid(zp->z_zfsvfs, ZTOV(zp)->v_type, aclp,
oldaclp, newaclnode->z_acldata, aclp->z_acl_count,
&newaclnode->z_size, NULL, cr));
newaclnode->z_ace_count = aclp->z_acl_count;
aclp->z_version = ZFS_ACL_VERSION;
kmem_free(oldaclp, aclp->z_acl_count * sizeof (zfs_oldace_t));
/*
* Release all previous ACL nodes
*/
zfs_acl_release_nodes(aclp);
list_insert_head(&aclp->z_acl, newaclnode);
aclp->z_acl_bytes = newaclnode->z_size;
aclp->z_acl_count = newaclnode->z_ace_count;
}
/*
* Convert unix access mask to v4 access mask
*/
static uint32_t
zfs_unix_to_v4(uint32_t access_mask)
{
uint32_t new_mask = 0;
if (access_mask & S_IXOTH)
new_mask |= ACE_EXECUTE;
if (access_mask & S_IWOTH)
new_mask |= ACE_WRITE_DATA;
if (access_mask & S_IROTH)
new_mask |= ACE_READ_DATA;
return (new_mask);
}
static void
zfs_set_ace(zfs_acl_t *aclp, void *acep, uint32_t access_mask,
uint16_t access_type, uint64_t fuid, uint16_t entry_type)
{
uint16_t type = entry_type & ACE_TYPE_FLAGS;
aclp->z_ops->ace_mask_set(acep, access_mask);
aclp->z_ops->ace_type_set(acep, access_type);
aclp->z_ops->ace_flags_set(acep, entry_type);
if ((type != ACE_OWNER && type != OWNING_GROUP &&
type != ACE_EVERYONE))
aclp->z_ops->ace_who_set(acep, fuid);
}
/*
* Determine mode of file based on ACL.
*/
uint64_t
zfs_mode_compute(uint64_t fmode, zfs_acl_t *aclp,
uint64_t *pflags, uint64_t fuid, uint64_t fgid)
{
int entry_type;
mode_t mode;
mode_t seen = 0;
zfs_ace_hdr_t *acep = NULL;
uint64_t who;
uint16_t iflags, type;
uint32_t access_mask;
boolean_t an_exec_denied = B_FALSE;
mode = (fmode & (S_IFMT | S_ISUID | S_ISGID | S_ISVTX));
while ((acep = zfs_acl_next_ace(aclp, acep, &who,
&access_mask, &iflags, &type))) {
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* Skip over any inherit_only ACEs
*/
if (iflags & ACE_INHERIT_ONLY_ACE)
continue;
if (entry_type == ACE_OWNER || (entry_type == 0 &&
who == fuid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRUSR))) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWUSR))) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXUSR))) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
} else if (entry_type == OWNING_GROUP ||
(entry_type == ACE_IDENTIFIER_GROUP && who == fgid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRGRP))) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWGRP))) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXGRP))) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
} else if (entry_type == ACE_EVERYONE) {
if ((access_mask & ACE_READ_DATA)) {
if (!(seen & S_IRUSR)) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if (!(seen & S_IRGRP)) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if (!(seen & S_IROTH)) {
seen |= S_IROTH;
if (type == ALLOW) {
mode |= S_IROTH;
}
}
}
if ((access_mask & ACE_WRITE_DATA)) {
if (!(seen & S_IWUSR)) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if (!(seen & S_IWGRP)) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if (!(seen & S_IWOTH)) {
seen |= S_IWOTH;
if (type == ALLOW) {
mode |= S_IWOTH;
}
}
}
if ((access_mask & ACE_EXECUTE)) {
if (!(seen & S_IXUSR)) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
if (!(seen & S_IXGRP)) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
if (!(seen & S_IXOTH)) {
seen |= S_IXOTH;
if (type == ALLOW) {
mode |= S_IXOTH;
}
}
}
} else {
/*
* Only care if this IDENTIFIER_GROUP or
* USER ACE denies execute access to someone,
* mode is not affected
*/
if ((access_mask & ACE_EXECUTE) && type == DENY)
an_exec_denied = B_TRUE;
}
}
/*
* Failure to allow is effectively a deny, so execute permission
* is denied if it was never mentioned or if we explicitly
* weren't allowed it.
*/
if (!an_exec_denied &&
((seen & ALL_MODE_EXECS) != ALL_MODE_EXECS ||
(mode & ALL_MODE_EXECS) != ALL_MODE_EXECS))
an_exec_denied = B_TRUE;
if (an_exec_denied)
*pflags &= ~ZFS_NO_EXECS_DENIED;
else
*pflags |= ZFS_NO_EXECS_DENIED;
return (mode);
}
/*
* Read an external acl object. If the intent is to modify, always
* create a new acl and leave any cached acl in place.
*/
int
zfs_acl_node_read(znode_t *zp, boolean_t have_lock, zfs_acl_t **aclpp,
boolean_t will_modify)
{
zfs_acl_t *aclp;
int aclsize;
int acl_count;
zfs_acl_node_t *aclnode;
zfs_acl_phys_t znode_acl;
int version;
int error;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(ZTOV(zp), __func__);
if (zp->z_acl_cached && !will_modify) {
*aclpp = zp->z_acl_cached;
return (0);
}
version = zfs_znode_acl_version(zp);
if ((error = zfs_acl_znode_info(zp, &aclsize,
&acl_count, &znode_acl)) != 0) {
goto done;
}
aclp = zfs_acl_alloc(version);
aclp->z_acl_count = acl_count;
aclp->z_acl_bytes = aclsize;
aclnode = zfs_acl_node_alloc(aclsize);
aclnode->z_ace_count = aclp->z_acl_count;
aclnode->z_size = aclsize;
if (!zp->z_is_sa) {
if (znode_acl.z_acl_extern_obj) {
error = dmu_read(zp->z_zfsvfs->z_os,
znode_acl.z_acl_extern_obj, 0, aclnode->z_size,
aclnode->z_acldata, DMU_READ_PREFETCH);
} else {
bcopy(znode_acl.z_ace_data, aclnode->z_acldata,
aclnode->z_size);
}
} else {
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zp->z_zfsvfs),
aclnode->z_acldata, aclnode->z_size);
}
if (error != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
goto done;
}
list_insert_head(&aclp->z_acl, aclnode);
*aclpp = aclp;
if (!will_modify)
zp->z_acl_cached = aclp;
done:
return (error);
}
/*ARGSUSED*/
void
zfs_acl_data_locator(void **dataptr, uint32_t *length, uint32_t buflen,
boolean_t start, void *userdata)
{
zfs_acl_locator_cb_t *cb = (zfs_acl_locator_cb_t *)userdata;
if (start) {
cb->cb_acl_node = list_head(&cb->cb_aclp->z_acl);
} else {
cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl,
cb->cb_acl_node);
}
*dataptr = cb->cb_acl_node->z_acldata;
*length = cb->cb_acl_node->z_size;
}
int
zfs_acl_chown_setattr(znode_t *zp)
{
int error;
zfs_acl_t *aclp;
if (zp->z_zfsvfs->z_replay == B_FALSE) {
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
ASSERT_VOP_IN_SEQC(ZTOV(zp));
}
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if ((error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE)) == 0)
zp->z_mode = zfs_mode_compute(zp->z_mode, aclp,
&zp->z_pflags, zp->z_uid, zp->z_gid);
return (error);
}
/*
* common code for setting ACLs.
*
* This function is called from zfs_mode_update, zfs_perm_init, and zfs_setacl.
* zfs_setacl passes a non-NULL inherit pointer (ihp) to indicate that it's
* already checked the acl and knows whether to inherit.
*/
int
zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
{
int error;
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
dmu_object_type_t otype;
zfs_acl_locator_cb_t locate = { 0 };
uint64_t mode;
sa_bulk_attr_t bulk[5];
uint64_t ctime[2];
int count = 0;
zfs_acl_phys_t acl_phys;
if (zp->z_zfsvfs->z_replay == B_FALSE) {
ASSERT_VOP_IN_SEQC(ZTOV(zp));
}
mode = zp->z_mode;
mode = zfs_mode_compute(mode, aclp, &zp->z_pflags,
zp->z_uid, zp->z_gid);
zp->z_mode = mode;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
/*
* Upgrade needed?
*/
if (!zfsvfs->z_use_fuids) {
otype = DMU_OT_OLDACL;
} else {
if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) &&
(zfsvfs->z_version >= ZPL_VERSION_FUID))
zfs_acl_xform(zp, aclp, cr);
ASSERT3U(aclp->z_version, >=, ZFS_ACL_VERSION_FUID);
otype = DMU_OT_ACL;
}
/*
* Arrgh, we have to handle old on disk format
* as well as newer (preferred) SA format.
*/
if (zp->z_is_sa) { /* the easy case, just update the ACL attribute */
locate.cb_aclp = aclp;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate, aclp->z_acl_bytes);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zfsvfs),
NULL, &aclp->z_acl_count, sizeof (uint64_t));
} else { /* Painful legacy way */
zfs_acl_node_t *aclnode;
uint64_t off = 0;
uint64_t aoid;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
&acl_phys, sizeof (acl_phys))) != 0)
return (error);
aoid = acl_phys.z_acl_extern_obj;
if (aclp->z_acl_bytes > ZFS_ACE_SPACE) {
/*
* If ACL was previously external and we are now
* converting to new ACL format then release old
* ACL object and create a new one.
*/
if (aoid &&
aclp->z_version != acl_phys.z_acl_version) {
error = dmu_object_free(zfsvfs->z_os, aoid, tx);
if (error)
return (error);
aoid = 0;
}
if (aoid == 0) {
aoid = dmu_object_alloc(zfsvfs->z_os,
otype, aclp->z_acl_bytes,
otype == DMU_OT_ACL ?
DMU_OT_SYSACL : DMU_OT_NONE,
otype == DMU_OT_ACL ?
DN_OLD_MAX_BONUSLEN : 0, tx);
} else {
(void) dmu_object_set_blocksize(zfsvfs->z_os,
aoid, aclp->z_acl_bytes, 0, tx);
}
acl_phys.z_acl_extern_obj = aoid;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
dmu_write(zfsvfs->z_os, aoid, off,
aclnode->z_size, aclnode->z_acldata, tx);
off += aclnode->z_size;
}
} else {
void *start = acl_phys.z_ace_data;
/*
* Migrating back embedded?
*/
if (acl_phys.z_acl_extern_obj) {
error = dmu_object_free(zfsvfs->z_os,
acl_phys.z_acl_extern_obj, tx);
if (error)
return (error);
acl_phys.z_acl_extern_obj = 0;
}
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
bcopy(aclnode->z_acldata, start,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
}
/*
* If Old version then swap count/bytes to match old
* layout of znode_acl_phys_t.
*/
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
acl_phys.z_acl_size = aclp->z_acl_count;
acl_phys.z_acl_count = aclp->z_acl_bytes;
} else {
acl_phys.z_acl_size = aclp->z_acl_bytes;
acl_phys.z_acl_count = aclp->z_acl_count;
}
acl_phys.z_acl_version = aclp->z_version;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (acl_phys));
}
/*
* Replace ACL wide bits, but first clear them.
*/
zp->z_pflags &= ~ZFS_ACL_WIDE_FLAGS;
zp->z_pflags |= aclp->z_hints;
if (ace_trivial_common(aclp, 0, zfs_ace_walk) == 0)
zp->z_pflags |= ZFS_ACL_TRIVIAL;
zfs_tstamp_update_setup(zp, STATE_CHANGED, NULL, ctime);
return (sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
}
static void
zfs_acl_chmod(vtype_t vtype, uint64_t mode, boolean_t split, boolean_t trim,
zfs_acl_t *aclp)
{
void *acep = NULL;
uint64_t who;
int new_count, new_bytes;
int ace_size;
int entry_type;
uint16_t iflags, type;
uint32_t access_mask;
zfs_acl_node_t *newnode;
size_t abstract_size = aclp->z_ops->ace_abstract_size();
void *zacep;
boolean_t isdir;
trivial_acl_t masks;
new_count = new_bytes = 0;
isdir = (vtype == VDIR);
acl_trivial_access_masks((mode_t)mode, isdir, &masks);
newnode = zfs_acl_node_alloc((abstract_size * 6) + aclp->z_acl_bytes);
zacep = newnode->z_acldata;
if (masks.allow0) {
zfs_set_ace(aclp, zacep, masks.allow0, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny1) {
zfs_set_ace(aclp, zacep, masks.deny1, DENY, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny2) {
zfs_set_ace(aclp, zacep, masks.deny2, DENY, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* ACEs used to represent the file mode may be divided
* into an equivalent pair of inherit-only and regular
* ACEs, if they are inheritable.
* Skip regular ACEs, which are replaced by the new mode.
*/
if (split && (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)) {
if (!isdir || !(iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
continue;
/*
* We preserve owner@, group@, or @everyone
* permissions, if they are inheritable, by
* copying them to inherit_only ACEs. This
* prevents inheritable permissions from being
* altered along with the file mode.
*/
iflags |= ACE_INHERIT_ONLY_ACE;
}
/*
* If this ACL has any inheritable ACEs, mark that in
* the hints (which are later masked into the pflags)
* so create knows to do inheritance.
*/
if (isdir && (iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if ((type != ALLOW && type != DENY) ||
(iflags & ACE_INHERIT_ONLY_ACE)) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
break;
}
} else {
/*
* Limit permissions granted by ACEs to be no greater
* than permissions of the requested group mode.
* Applies when the "aclmode" property is set to
* "groupmask".
*/
if ((type == ALLOW) && trim)
access_mask &= masks.group;
}
zfs_set_ace(aclp, zacep, access_mask, type, who, iflags);
ace_size = aclp->z_ops->ace_size(acep);
zacep = (void *)((uintptr_t)zacep + ace_size);
new_count++;
new_bytes += ace_size;
}
zfs_set_ace(aclp, zacep, masks.owner, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.group, ALLOW, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.everyone, ALLOW, -1, ACE_EVERYONE);
new_count += 3;
new_bytes += abstract_size * 3;
zfs_acl_release_nodes(aclp);
aclp->z_acl_count = new_count;
aclp->z_acl_bytes = new_bytes;
newnode->z_ace_count = new_count;
newnode->z_size = new_bytes;
list_insert_tail(&aclp->z_acl, newnode);
}
int
zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode)
{
int error = 0;
mutex_enter(&zp->z_acl_lock);
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_DISCARD)
*aclp = zfs_acl_alloc(zfs_acl_version_zp(zp));
else
error = zfs_acl_node_read(zp, B_TRUE, aclp, B_TRUE);
if (error == 0) {
(*aclp)->z_hints = zp->z_pflags & V4_ACL_WIDE_FLAGS;
zfs_acl_chmod(ZTOV(zp)->v_type, mode, B_TRUE,
(zp->z_zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK), *aclp);
}
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Should ACE be inherited?
*/
static int
zfs_ace_can_use(vtype_t vtype, uint16_t acep_flags)
{
int iflags = (acep_flags & 0xf);
if ((vtype == VDIR) && (iflags & ACE_DIRECTORY_INHERIT_ACE))
return (1);
else if (iflags & ACE_FILE_INHERIT_ACE)
return (!((vtype == VDIR) &&
(iflags & ACE_NO_PROPAGATE_INHERIT_ACE)));
return (0);
}
/*
* inherit inheritable ACEs from parent
*/
static zfs_acl_t *
zfs_acl_inherit(zfsvfs_t *zfsvfs, vtype_t vtype, zfs_acl_t *paclp,
uint64_t mode, boolean_t *need_chmod)
{
void *pacep = NULL;
void *acep;
zfs_acl_node_t *aclnode;
zfs_acl_t *aclp = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t iflags, newflags, type;
size_t ace_size;
void *data1, *data2;
size_t data1sz, data2sz;
uint_t aclinherit;
boolean_t isdir = (vtype == VDIR);
boolean_t isreg = (vtype == VREG);
*need_chmod = B_TRUE;
aclp = zfs_acl_alloc(paclp->z_version);
aclinherit = zfsvfs->z_acl_inherit;
if (aclinherit == ZFS_ACL_DISCARD || vtype == VLNK)
return (aclp);
while ((pacep = zfs_acl_next_ace(paclp, pacep, &who,
&access_mask, &iflags, &type))) {
/*
* don't inherit bogus ACEs
*/
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
/*
* Check if ACE is inheritable by this vnode
*/
if ((aclinherit == ZFS_ACL_NOALLOW && type == ALLOW) ||
!zfs_ace_can_use(vtype, iflags))
continue;
/*
* If owner@, group@, or everyone@ inheritable
* then zfs_acl_chmod() isn't needed.
*/
if ((aclinherit == ZFS_ACL_PASSTHROUGH ||
aclinherit == ZFS_ACL_PASSTHROUGH_X) &&
((iflags & (ACE_OWNER|ACE_EVERYONE)) ||
((iflags & OWNING_GROUP) == OWNING_GROUP)) &&
(isreg || (isdir && (iflags & ACE_DIRECTORY_INHERIT_ACE))))
*need_chmod = B_FALSE;
/*
* Strip inherited execute permission from file if
* not in mode
*/
if (aclinherit == ZFS_ACL_PASSTHROUGH_X && type == ALLOW &&
!isdir && ((mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)) {
access_mask &= ~ACE_EXECUTE;
}
/*
* Strip write_acl and write_owner from permissions
* when inheriting an ACE
*/
if (aclinherit == ZFS_ACL_RESTRICTED && type == ALLOW) {
access_mask &= ~RESTRICTED_CLEAR;
}
ace_size = aclp->z_ops->ace_size(pacep);
aclnode = zfs_acl_node_alloc(ace_size);
list_insert_tail(&aclp->z_acl, aclnode);
acep = aclnode->z_acldata;
zfs_set_ace(aclp, acep, access_mask, type,
who, iflags|ACE_INHERITED_ACE);
/*
* Copy special opaque data if any
*/
if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) {
data2sz = aclp->z_ops->ace_data(acep, &data2);
VERIFY3U(data2sz, ==, data1sz);
bcopy(data1, data2, data2sz);
}
aclp->z_acl_count++;
aclnode->z_ace_count++;
aclp->z_acl_bytes += aclnode->z_size;
newflags = aclp->z_ops->ace_flags_get(acep);
/*
* If ACE is not to be inherited further, or if the vnode is
* not a directory, remove all inheritance flags
*/
if (!isdir || (iflags & ACE_NO_PROPAGATE_INHERIT_ACE)) {
newflags &= ~ALL_INHERIT;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
continue;
}
/*
* This directory has an inheritable ACE
*/
aclp->z_hints |= ZFS_INHERIT_ACE;
/*
* If only FILE_INHERIT is set then turn on
* inherit_only
*/
if ((iflags & (ACE_FILE_INHERIT_ACE |
ACE_DIRECTORY_INHERIT_ACE)) == ACE_FILE_INHERIT_ACE) {
newflags |= ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
} else {
newflags &= ~ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
}
}
if (zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
aclp->z_acl_count != 0) {
*need_chmod = B_FALSE;
}
return (aclp);
}
/*
* Create file system object initial permissions
* including inheritable ACEs.
* Also, create FUIDs for owner and group.
*/
int
zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids)
{
int error;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zfs_acl_t *paclp;
gid_t gid;
boolean_t need_chmod = B_TRUE;
boolean_t trim = B_FALSE;
boolean_t inherited = B_FALSE;
if ((flag & IS_ROOT_NODE) == 0) {
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(dzp), __func__);
} else
ASSERT3P(dzp->z_vnode, ==, NULL);
bzero(acl_ids, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = MAKEIMODE(vap->va_type, vap->va_mode);
if (vsecp)
if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_type, vsecp, cr,
&acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0)
return (error);
/*
* Determine uid and gid.
*/
if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay ||
((flag & IS_XATTR) && (vap->va_type == VDIR))) {
acl_ids->z_fuid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_uid, cr,
ZFS_OWNER, &acl_ids->z_fuidp);
acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid, cr,
ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
} else {
acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER,
cr, &acl_ids->z_fuidp);
acl_ids->z_fgid = 0;
if (vap->va_mask & AT_GID) {
acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
if (acl_ids->z_fgid != dzp->z_gid &&
!groupmember(vap->va_gid, cr) &&
secpolicy_vnode_create_gid(cr) != 0)
acl_ids->z_fgid = 0;
}
if (acl_ids->z_fgid == 0) {
char *domain;
uint32_t rid;
acl_ids->z_fgid = dzp->z_gid;
gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid,
cr, ZFS_GROUP);
if (zfsvfs->z_use_fuids &&
IS_EPHEMERAL(acl_ids->z_fgid)) {
domain =
zfs_fuid_idx_domain(&zfsvfs->z_fuid_idx,
FUID_INDEX(acl_ids->z_fgid));
rid = FUID_RID(acl_ids->z_fgid);
zfs_fuid_node_add(&acl_ids->z_fuidp,
domain, rid, FUID_INDEX(acl_ids->z_fgid),
acl_ids->z_fgid, ZFS_GROUP);
}
}
}
/*
* If we're creating a directory, and the parent directory has the
* set-GID bit set, set in on the new directory.
* Otherwise, if the user is neither privileged nor a member of the
* file's new group, clear the file's set-GID bit.
*/
if (!(flag & IS_ROOT_NODE) && (dzp->z_mode & S_ISGID) &&
(vap->va_type == VDIR)) {
acl_ids->z_mode |= S_ISGID;
} else {
if ((acl_ids->z_mode & S_ISGID) &&
secpolicy_vnode_setids_setgids(ZTOV(dzp), cr, gid) != 0)
acl_ids->z_mode &= ~S_ISGID;
}
if (acl_ids->z_aclp == NULL) {
mutex_enter(&dzp->z_acl_lock);
if (!(flag & IS_ROOT_NODE) &&
(dzp->z_pflags & ZFS_INHERIT_ACE) &&
!(dzp->z_pflags & ZFS_XATTR)) {
VERIFY0(zfs_acl_node_read(dzp, B_TRUE,
&paclp, B_FALSE));
acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
vap->va_type, paclp, acl_ids->z_mode, &need_chmod);
inherited = B_TRUE;
} else {
acl_ids->z_aclp =
zfs_acl_alloc(zfs_acl_version_zp(dzp));
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
mutex_exit(&dzp->z_acl_lock);
if (need_chmod) {
if (vap->va_type == VDIR)
acl_ids->z_aclp->z_hints |=
ZFS_ACL_AUTO_INHERIT;
if (zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH_X)
trim = B_TRUE;
zfs_acl_chmod(vap->va_type, acl_ids->z_mode, B_FALSE,
trim, acl_ids->z_aclp);
}
}
if (inherited || vsecp) {
acl_ids->z_mode = zfs_mode_compute(acl_ids->z_mode,
acl_ids->z_aclp, &acl_ids->z_aclp->z_hints,
acl_ids->z_fuid, acl_ids->z_fgid);
if (ace_trivial_common(acl_ids->z_aclp, 0, zfs_ace_walk) == 0)
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
return (0);
}
/*
* Free ACL and fuid_infop, but not the acl_ids structure
*/
void
zfs_acl_ids_free(zfs_acl_ids_t *acl_ids)
{
if (acl_ids->z_aclp)
zfs_acl_free(acl_ids->z_aclp);
if (acl_ids->z_fuidp)
zfs_fuid_info_free(acl_ids->z_fuidp);
acl_ids->z_aclp = NULL;
acl_ids->z_fuidp = NULL;
}
boolean_t
zfs_acl_ids_overquota(zfsvfs_t *zv, zfs_acl_ids_t *acl_ids, uint64_t projid)
{
return (zfs_id_overquota(zv, DMU_USERUSED_OBJECT, acl_ids->z_fuid) ||
zfs_id_overquota(zv, DMU_GROUPUSED_OBJECT, acl_ids->z_fgid) ||
(projid != ZFS_DEFAULT_PROJID && projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zv, DMU_PROJECTUSED_OBJECT, projid)));
}
/*
* Retrieve a file's ACL
*/
int
zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfs_acl_t *aclp;
ulong_t mask;
int error;
int count = 0;
int largeace = 0;
mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT |
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
if (mask == 0)
return (SET_ERROR(ENOSYS));
if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr)))
return (error);
mutex_enter(&zp->z_acl_lock);
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(ZTOV(zp), __func__);
error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Scan ACL to determine number of ACEs
*/
if ((zp->z_pflags & ZFS_ACL_OBJ_ACE) && !(mask & VSA_ACE_ALLTYPES)) {
void *zacep = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t type, iflags;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
largeace++;
continue;
default:
count++;
}
}
vsecp->vsa_aclcnt = count;
} else
count = (int)aclp->z_acl_count;
if (mask & VSA_ACECNT) {
vsecp->vsa_aclcnt = count;
}
if (mask & VSA_ACE) {
size_t aclsz;
aclsz = count * sizeof (ace_t) +
sizeof (ace_object_t) * largeace;
vsecp->vsa_aclentp = kmem_alloc(aclsz, KM_SLEEP);
vsecp->vsa_aclentsz = aclsz;
if (aclp->z_version == ZFS_ACL_VERSION_FUID)
zfs_copy_fuid_2_ace(zp->z_zfsvfs, aclp, cr,
vsecp->vsa_aclentp, !(mask & VSA_ACE_ALLTYPES));
else {
zfs_acl_node_t *aclnode;
void *start = vsecp->vsa_aclentp;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
bcopy(aclnode->z_acldata, start,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
ASSERT3U((caddr_t)start - (caddr_t)vsecp->vsa_aclentp,
==, aclp->z_acl_bytes);
}
}
if (mask & VSA_ACE_ACLFLAGS) {
vsecp->vsa_aclflags = 0;
if (zp->z_pflags & ZFS_ACL_DEFAULTED)
vsecp->vsa_aclflags |= ACL_DEFAULTED;
if (zp->z_pflags & ZFS_ACL_PROTECTED)
vsecp->vsa_aclflags |= ACL_PROTECTED;
if (zp->z_pflags & ZFS_ACL_AUTO_INHERIT)
vsecp->vsa_aclflags |= ACL_AUTO_INHERIT;
}
mutex_exit(&zp->z_acl_lock);
return (0);
}
int
zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, umode_t obj_type,
vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp)
{
zfs_acl_t *aclp;
zfs_acl_node_t *aclnode;
int aclcnt = vsecp->vsa_aclcnt;
int error;
if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0)
return (SET_ERROR(EINVAL));
aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version));
aclp->z_hints = 0;
aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t));
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
if ((error = zfs_copy_ace_2_oldace(obj_type, aclp,
(ace_t *)vsecp->vsa_aclentp, aclnode->z_acldata,
aclcnt, &aclnode->z_size)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
} else {
if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_type, aclp,
vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt,
&aclnode->z_size, fuidp, cr)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
}
aclp->z_acl_bytes = aclnode->z_size;
aclnode->z_ace_count = aclcnt;
aclp->z_acl_count = aclcnt;
list_insert_head(&aclp->z_acl, aclnode);
/*
* If flags are being set then add them to z_hints
*/
if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS) {
if (vsecp->vsa_aclflags & ACL_PROTECTED)
aclp->z_hints |= ZFS_ACL_PROTECTED;
if (vsecp->vsa_aclflags & ACL_DEFAULTED)
aclp->z_hints |= ZFS_ACL_DEFAULTED;
if (vsecp->vsa_aclflags & ACL_AUTO_INHERIT)
aclp->z_hints |= ZFS_ACL_AUTO_INHERIT;
}
*zaclp = aclp;
return (0);
}
/*
* Set a file's ACL
*/
int
zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zilog_t *zilog = zfsvfs->z_log;
ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT);
dmu_tx_t *tx;
int error;
zfs_acl_t *aclp;
zfs_fuid_info_t *fuidp = NULL;
boolean_t fuid_dirtied;
uint64_t acl_obj;
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
if (mask == 0)
return (SET_ERROR(ENOSYS));
if (zp->z_pflags & ZFS_IMMUTABLE)
return (SET_ERROR(EPERM));
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr)))
return (error);
error = zfs_vsec_2_aclp(zfsvfs, ZTOV(zp)->v_type, vsecp, cr, &fuidp,
&aclp);
if (error)
return (error);
/*
* If ACL wide flags aren't being set then preserve any
* existing flags.
*/
if (!(vsecp->vsa_mask & VSA_ACE_ACLFLAGS)) {
aclp->z_hints |=
(zp->z_pflags & V4_ACL_WIDE_FLAGS);
}
top:
mutex_enter(&zp->z_acl_lock);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
/*
* If old version and ACL won't fit in bonus and we aren't
* upgrading then take out necessary DMU holds
*/
if ((acl_obj = zfs_external_acl(zp)) != 0) {
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0, aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes);
}
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
mutex_exit(&zp->z_acl_lock);
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
zfs_acl_free(aclp);
return (error);
}
error = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(error);
ASSERT3P(zp->z_acl_cached, ==, NULL);
zp->z_acl_cached = aclp;
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
zfs_log_acl(zilog, tx, zp, vsecp, fuidp);
if (fuidp)
zfs_fuid_info_free(fuidp);
dmu_tx_commit(tx);
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Check accesses of interest (AoI) against attributes of the dataset
* such as read-only. Returns zero if no AoI conflict with dataset
* attributes, otherwise an appropriate errno is returned.
*/
static int
zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
{
if ((v4_mode & WRITE_MASK) &&
(zp->z_zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) &&
(!IS_DEVVP(ZTOV(zp)) ||
(IS_DEVVP(ZTOV(zp)) && (v4_mode & WRITE_MASK_ATTRS)))) {
return (SET_ERROR(EROFS));
}
/*
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common().
*/
if ((v4_mode & WRITE_MASK_DATA) &&
(zp->z_pflags & ZFS_IMMUTABLE)) {
return (SET_ERROR(EPERM));
}
/*
* In FreeBSD we allow to modify directory's content is ZFS_NOUNLINK
* (sunlnk) is set. We just don't allow directory removal, which is
* handled in zfs_zaccess_delete().
*/
if ((v4_mode & ACE_DELETE) &&
(zp->z_pflags & ZFS_NOUNLINK)) {
return (EPERM);
}
if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED))) {
return (SET_ERROR(EACCES));
}
return (0);
}
/*
* The primary usage of this function is to loop through all of the
* ACEs in the znode, determining what accesses of interest (AoI) to
* the caller are allowed or denied. The AoI are expressed as bits in
* the working_mode parameter. As each ACE is processed, bits covered
* by that ACE are removed from the working_mode. This removal
* facilitates two things. The first is that when the working mode is
* empty (= 0), we know we've looked at all the AoI. The second is
* that the ACE interpretation rules don't allow a later ACE to undo
* something granted or denied by an earlier ACE. Removing the
* discovered access or denial enforces this rule. At the end of
* processing the ACEs, all AoI that were found to be denied are
* placed into the working_mode, giving the caller a mask of denied
* accesses. Returns:
* 0 if all AoI granted
* EACCESS if the denied mask is non-zero
* other error if abnormal failure (e.g., IO error)
*
* A secondary usage of the function is to determine if any of the
* AoI are granted. If an ACE grants any access in
* the working_mode, we immediately short circuit out of the function.
* This mode is chosen by setting anyaccess to B_TRUE. The
* working_mode is not a denied access mask upon exit if the function
* is used in this manner.
*/
static int
zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
boolean_t anyaccess, cred_t *cr)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zfs_acl_t *aclp;
int error;
uid_t uid = crgetuid(cr);
uint64_t who;
uint16_t type, iflags;
uint16_t entry_type;
uint32_t access_mask;
uint32_t deny_mask = 0;
zfs_ace_hdr_t *acep = NULL;
boolean_t checkit;
uid_t gowner;
uid_t fowner;
zfs_fuid_map_ids(zp, cr, &fowner, &gowner);
mutex_enter(&zp->z_acl_lock);
if (zp->z_zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(ZTOV(zp), __func__);
error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
ASSERT3P(zp->z_acl_cached, !=, NULL);
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
uint32_t mask_matched;
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
if (ZTOV(zp)->v_type == VDIR && (iflags & ACE_INHERIT_ONLY_ACE))
continue;
/* Skip ACE if it does not affect any AoI */
mask_matched = (access_mask & *working_mode);
if (!mask_matched)
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
checkit = B_FALSE;
switch (entry_type) {
case ACE_OWNER:
if (uid == fowner)
checkit = B_TRUE;
break;
case OWNING_GROUP:
who = gowner;
- /* FALLTHROUGH */
+ fallthrough;
case ACE_IDENTIFIER_GROUP:
checkit = zfs_groupmember(zfsvfs, who, cr);
break;
case ACE_EVERYONE:
checkit = B_TRUE;
break;
/* USER Entry */
default:
if (entry_type == 0) {
uid_t newid;
newid = zfs_fuid_map_id(zfsvfs, who, cr,
ZFS_ACE_USER);
if (newid != UID_NOBODY &&
uid == newid)
checkit = B_TRUE;
break;
} else {
mutex_exit(&zp->z_acl_lock);
return (SET_ERROR(EIO));
}
}
if (checkit) {
if (type == DENY) {
DTRACE_PROBE3(zfs__ace__denies,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
deny_mask |= mask_matched;
} else {
DTRACE_PROBE3(zfs__ace__allows,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
if (anyaccess) {
mutex_exit(&zp->z_acl_lock);
return (0);
}
}
*working_mode &= ~mask_matched;
}
/* Are we done? */
if (*working_mode == 0)
break;
}
mutex_exit(&zp->z_acl_lock);
/* Put the found 'denies' back on the working mode */
if (deny_mask) {
*working_mode |= deny_mask;
return (SET_ERROR(EACCES));
} else if (*working_mode) {
return (-1);
}
return (0);
}
/*
* Return true if any access whatsoever granted, we don't actually
* care what access is granted.
*/
boolean_t
zfs_has_access(znode_t *zp, cred_t *cr)
{
uint32_t have = ACE_ALL_PERMS;
if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr) != 0) {
uid_t owner;
owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
return (secpolicy_vnode_any_access(cr, ZTOV(zp), owner) == 0);
}
return (B_TRUE);
}
static int
zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr)
{
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int err;
*working_mode = v4_mode;
*check_privs = B_TRUE;
/*
* Short circuit empty requests
*/
if (v4_mode == 0 || zfsvfs->z_replay) {
*working_mode = 0;
return (0);
}
if ((err = zfs_zaccess_dataset_check(zp, v4_mode)) != 0) {
*check_privs = B_FALSE;
return (err);
}
/*
* The caller requested that the ACL check be skipped. This
* would only happen if the caller checked VOP_ACCESS() with a
* 32 bit ACE mask and already had the appropriate permissions.
*/
if (skipaclchk) {
*working_mode = 0;
return (0);
}
/*
* Note: ZFS_READONLY represents the "DOS R/O" attribute.
* When that flag is set, we should behave as if write access
* were not granted by anything in the ACL. In particular:
* We _must_ allow writes after opening the file r/w, then
* setting the DOS R/O attribute, and writing some more.
* (Similar to how you can write after fchmod(fd, 0444).)
*
* Therefore ZFS_READONLY is ignored in the dataset check
* above, and checked here as if part of the ACL check.
* Also note: DOS R/O is ignored for directories.
*/
if ((v4_mode & WRITE_MASK_DATA) &&
(ZTOV(zp)->v_type != VDIR) &&
(zp->z_pflags & ZFS_READONLY)) {
return (SET_ERROR(EPERM));
}
return (zfs_zaccess_aces_check(zp, working_mode, B_FALSE, cr));
}
static int
zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs,
cred_t *cr)
{
if (*working_mode != ACE_WRITE_DATA)
return (SET_ERROR(EACCES));
return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode,
check_privs, B_FALSE, cr));
}
/*
* Check if VEXEC is allowed.
*
* This routine is based on zfs_fastaccesschk_execute which has slowpath
* calling zfs_zaccess. This would be incorrect on FreeBSD (see
* zfs_freebsd_access for the difference). Thus this variant let's the
* caller handle the slowpath (if necessary).
*
* On top of that we perform a lockless check for ZFS_NO_EXECS_DENIED.
*
* Safe access to znode_t is provided by the vnode lock.
*/
int
zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
{
boolean_t is_attr;
if (zdp->z_pflags & ZFS_AV_QUARANTINED)
return (1);
is_attr = ((zdp->z_pflags & ZFS_XATTR) &&
(ZTOV(zdp)->v_type == VDIR));
if (is_attr)
return (1);
if (zdp->z_pflags & ZFS_NO_EXECS_DENIED)
return (0);
return (1);
}
/*
* Determine whether Access should be granted/denied.
*
* The least priv subsystem is always consulted as a basic privilege
* can define any form of access.
*/
int
zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
{
uint32_t working_mode;
int error;
int is_attr;
boolean_t check_privs;
znode_t *xzp = NULL;
znode_t *check_zp = zp;
mode_t needed_bits;
uid_t owner;
is_attr = ((zp->z_pflags & ZFS_XATTR) && (ZTOV(zp)->v_type == VDIR));
/*
* In FreeBSD, we don't care about permissions of individual ADS.
* Note that not checking them is not just an optimization - without
* this shortcut, EA operations may bogusly fail with EACCES.
*/
if (zp->z_pflags & ZFS_XATTR)
return (0);
owner = zfs_fuid_map_id(zp->z_zfsvfs, zp->z_uid, cr, ZFS_OWNER);
/*
* Map the bits required to the standard vnode flags VREAD|VWRITE|VEXEC
* in needed_bits. Map the bits mapped by working_mode (currently
* missing) in missing_bits.
* Call secpolicy_vnode_access2() with (needed_bits & ~checkmode),
* needed_bits.
*/
needed_bits = 0;
working_mode = mode;
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) &&
owner == crgetuid(cr))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= VREAD;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= VWRITE;
if (working_mode & ACE_EXECUTE)
needed_bits |= VEXEC;
if ((error = zfs_zaccess_common(check_zp, mode, &working_mode,
&check_privs, skipaclchk, cr)) == 0) {
if (is_attr)
VN_RELE(ZTOV(xzp));
return (secpolicy_vnode_access2(cr, ZTOV(zp), owner,
needed_bits, needed_bits));
}
if (error && !check_privs) {
if (is_attr)
VN_RELE(ZTOV(xzp));
return (error);
}
if (error && (flags & V_APPEND)) {
error = zfs_zaccess_append(zp, &working_mode, &check_privs, cr);
}
if (error && check_privs) {
mode_t checkmode = 0;
vnode_t *check_vp = ZTOV(check_zp);
/*
* First check for implicit owner permission on
* read_acl/read_attributes
*/
error = 0;
ASSERT3U(working_mode, !=, 0);
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) &&
owner == crgetuid(cr)))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= VREAD;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= VWRITE;
if (working_mode & ACE_EXECUTE)
checkmode |= VEXEC;
error = secpolicy_vnode_access2(cr, check_vp, owner,
needed_bits & ~checkmode, needed_bits);
if (error == 0 && (working_mode & ACE_WRITE_OWNER))
error = secpolicy_vnode_chown(check_vp, cr, owner);
if (error == 0 && (working_mode & ACE_WRITE_ACL))
error = secpolicy_vnode_setdac(check_vp, cr, owner);
if (error == 0 && (working_mode &
(ACE_DELETE|ACE_DELETE_CHILD)))
error = secpolicy_vnode_remove(check_vp, cr);
if (error == 0 && (working_mode & ACE_SYNCHRONIZE)) {
error = secpolicy_vnode_chown(check_vp, cr, owner);
}
if (error == 0) {
/*
* See if any bits other than those already checked
* for are still present. If so then return EACCES
*/
if (working_mode & ~(ZFS_CHECKED_MASKS)) {
error = SET_ERROR(EACCES);
}
}
} else if (error == 0) {
error = secpolicy_vnode_access2(cr, ZTOV(zp), owner,
needed_bits, needed_bits);
}
if (is_attr)
VN_RELE(ZTOV(xzp));
return (error);
}
/*
* Translate traditional unix VREAD/VWRITE/VEXEC mode into
* NFSv4-style ZFS ACL format and call zfs_zaccess()
*/
int
zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr)
{
return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr));
}
/*
* Access function for secpolicy_vnode_setattr
*/
int
zfs_zaccess_unix(znode_t *zp, mode_t mode, cred_t *cr)
{
int v4_mode = zfs_unix_to_v4(mode >> 6);
return (zfs_zaccess(zp, v4_mode, 0, B_FALSE, cr));
}
static int
zfs_delete_final_check(znode_t *zp, znode_t *dzp,
mode_t available_perms, cred_t *cr)
{
int error;
uid_t downer;
downer = zfs_fuid_map_id(dzp->z_zfsvfs, dzp->z_uid, cr, ZFS_OWNER);
error = secpolicy_vnode_access2(cr, ZTOV(dzp),
downer, available_perms, VWRITE|VEXEC);
if (error == 0)
error = zfs_sticky_remove_access(dzp, zp, cr);
return (error);
}
/*
* Determine whether Access should be granted/deny, without
* consulting least priv subsystem.
*
* The following chart is the recommended NFSv4 enforcement for
* ability to delete an object.
*
* -------------------------------------------------------
* | Parent Dir | Target Object Permissions |
* | permissions | |
* -------------------------------------------------------
* | | ACL Allows | ACL Denies| Delete |
* | | Delete | Delete | unspecified|
* -------------------------------------------------------
* | ACL Allows | Permit | Permit | Permit |
* | DELETE_CHILD | |
* -------------------------------------------------------
* | ACL Denies | Permit | Deny | Deny |
* | DELETE_CHILD | | | |
* -------------------------------------------------------
* | ACL specifies | | | |
* | only allow | Permit | Permit | Permit |
* | write and | | | |
* | execute | | | |
* -------------------------------------------------------
* | ACL denies | | | |
* | write and | Permit | Deny | Deny |
* | execute | | | |
* -------------------------------------------------------
* ^
* |
* No search privilege, can't even look up file?
*
*/
int
zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr)
{
uint32_t dzp_working_mode = 0;
uint32_t zp_working_mode = 0;
int dzp_error, zp_error;
mode_t available_perms;
boolean_t dzpcheck_privs = B_TRUE;
boolean_t zpcheck_privs = B_TRUE;
/*
* We want specific DELETE permissions to
* take precedence over WRITE/EXECUTE. We don't
* want an ACL such as this to mess us up.
* user:joe:write_data:deny,user:joe:delete:allow
*
* However, deny permissions may ultimately be overridden
* by secpolicy_vnode_access().
*
* We will ask for all of the necessary permissions and then
* look at the working modes from the directory and target object
* to determine what was found.
*/
if (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_NOUNLINK))
return (SET_ERROR(EPERM));
/*
* First row
* If the directory permissions allow the delete, we are done.
*/
if ((dzp_error = zfs_zaccess_common(dzp, ACE_DELETE_CHILD,
&dzp_working_mode, &dzpcheck_privs, B_FALSE, cr)) == 0)
return (0);
/*
* If target object has delete permission then we are done
*/
if ((zp_error = zfs_zaccess_common(zp, ACE_DELETE, &zp_working_mode,
&zpcheck_privs, B_FALSE, cr)) == 0)
return (0);
ASSERT(dzp_error);
ASSERT(zp_error);
if (!dzpcheck_privs)
return (dzp_error);
if (!zpcheck_privs)
return (zp_error);
/*
* Second row
*
* If directory returns EACCES then delete_child was denied
* due to deny delete_child. In this case send the request through
* secpolicy_vnode_remove(). We don't use zfs_delete_final_check()
* since that *could* allow the delete based on write/execute permission
* and we want delete permissions to override write/execute.
*/
if (dzp_error == EACCES) {
/* XXXPJD: s/dzp/zp/ ? */
return (secpolicy_vnode_remove(ZTOV(dzp), cr));
}
/*
* Third Row
* only need to see if we have write/execute on directory.
*/
dzp_error = zfs_zaccess_common(dzp, ACE_EXECUTE|ACE_WRITE_DATA,
&dzp_working_mode, &dzpcheck_privs, B_FALSE, cr);
if (dzp_error != 0 && !dzpcheck_privs)
return (dzp_error);
/*
* Fourth row
*/
available_perms = (dzp_working_mode & ACE_WRITE_DATA) ? 0 : VWRITE;
available_perms |= (dzp_working_mode & ACE_EXECUTE) ? 0 : VEXEC;
return (zfs_delete_final_check(zp, dzp, available_perms, cr));
}
int
zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp,
znode_t *tzp, cred_t *cr)
{
int add_perm;
int error;
if (szp->z_pflags & ZFS_AV_QUARANTINED)
return (SET_ERROR(EACCES));
add_perm = (ZTOV(szp)->v_type == VDIR) ?
ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE;
/*
* Rename permissions are combination of delete permission +
* add file/subdir permission.
*
* BSD operating systems also require write permission
* on the directory being moved from one parent directory
* to another.
*/
if (ZTOV(szp)->v_type == VDIR && ZTOV(sdzp) != ZTOV(tdzp)) {
if ((error = zfs_zaccess(szp, ACE_WRITE_DATA, 0, B_FALSE, cr)))
return (error);
}
/*
* first make sure we do the delete portion.
*
* If that succeeds then check for add_file/add_subdir permissions
*/
if ((error = zfs_zaccess_delete(sdzp, szp, cr)))
return (error);
/*
* If we have a tzp, see if we can delete it?
*/
if (tzp && (error = zfs_zaccess_delete(tdzp, tzp, cr)))
return (error);
/*
* Now check for add permissions
*/
error = zfs_zaccess(tdzp, add_perm, 0, B_FALSE, cr);
return (error);
}
diff --git a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
index 61ff072b3fc6..2b45f7ae3b30 100644
--- a/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
+++ b/sys/contrib/openzfs/module/os/freebsd/zfs/zfs_vnops_os.c
@@ -1,6218 +1,6218 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Nexenta Systems, Inc.
*/
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/vfs.h>
#include <sys/endian.h>
#include <sys/vm.h>
#include <sys/vnode.h>
#if __FreeBSD_version >= 1300102
#include <sys/smr.h>
#endif
#include <sys/dirent.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/taskq.h>
#include <sys/uio.h>
#include <sys/atomic.h>
#include <sys/namei.h>
#include <sys/mman.h>
#include <sys/cmn_err.h>
#include <sys/kdb.h>
#include <sys/sysproto.h>
#include <sys/errno.h>
#include <sys/unistd.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_ioctl.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/policy.h>
#include <sys/sunddi.h>
#include <sys/filio.h>
#include <sys/sid.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_rlock.h>
#include <sys/extdirent.h>
#include <sys/bio.h>
#include <sys/buf.h>
#include <sys/sched.h>
#include <sys/acl.h>
#include <sys/vmmeter.h>
#include <vm/vm_param.h>
#include <sys/zil.h>
#include <sys/zfs_vnops.h>
#include <vm/vm_object.h>
#include <sys/extattr.h>
#include <sys/priv.h>
#ifndef VN_OPEN_INVFS
#define VN_OPEN_INVFS 0x0
#endif
VFS_SMR_DECLARE;
#if __FreeBSD_version >= 1300047
#define vm_page_wire_lock(pp)
#define vm_page_wire_unlock(pp)
#else
#define vm_page_wire_lock(pp) vm_page_lock(pp)
#define vm_page_wire_unlock(pp) vm_page_unlock(pp)
#endif
#ifdef DEBUG_VFS_LOCKS
#define VNCHECKREF(vp) \
VNASSERT((vp)->v_holdcnt > 0 && (vp)->v_usecount > 0, vp, \
("%s: wrong ref counts", __func__));
#else
#define VNCHECKREF(vp)
#endif
/*
* Programming rules.
*
* Each vnode op performs some logical unit of work. To do this, the ZPL must
* properly lock its in-core state, create a DMU transaction, do the work,
* record this work in the intent log (ZIL), commit the DMU transaction,
* and wait for the intent log to commit if it is a synchronous operation.
* Moreover, the vnode ops must work in both normal and log replay context.
* The ordering of events is important to avoid deadlocks and references
* to freed memory. The example below illustrates the following Big Rules:
*
* (1) A check must be made in each zfs thread for a mounted file system.
* This is done avoiding races using ZFS_ENTER(zfsvfs).
* A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
* must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
* can return EIO from the calling function.
*
* (2) VN_RELE() should always be the last thing except for zil_commit()
* (if necessary) and ZFS_EXIT(). This is for 3 reasons:
* First, if it's the last reference, the vnode/znode
* can be freed, so the zp may point to freed memory. Second, the last
* reference will call zfs_zinactive(), which may induce a lot of work --
* pushing cached pages (which acquires range locks) and syncing out
* cached atime changes. Third, zfs_zinactive() may require a new tx,
* which could deadlock the system if you were already holding one.
* If you must call VN_RELE() within a tx then use VN_RELE_ASYNC().
*
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
* as they can span dmu_tx_assign() calls.
*
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
* dmu_tx_assign(). This is critical because we don't want to block
* while holding locks.
*
* If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
* reduces lock contention and CPU usage when we must wait (note that if
* throughput is constrained by the storage, nearly every transaction
* must wait).
*
* Note, in particular, that if a lock is sometimes acquired before
* the tx assigns, and sometimes after (e.g. z_lock), then failing
* to use a non-blocking assign can deadlock the system. The scenario:
*
* Thread A has grabbed a lock before calling dmu_tx_assign().
* Thread B is in an already-assigned tx, and blocks for this lock.
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
* forever, because the previous txg can't quiesce until B's tx commits.
*
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
* to indicate that this operation has already called dmu_tx_wait().
* This will ensure that we don't retry forever, waiting a short bit
* each time.
*
* (5) If the operation succeeded, generate the intent log entry for it
* before dropping locks. This ensures that the ordering of events
* in the intent log matches the order in which they actually occurred.
* During ZIL replay the zfs_log_* functions will update the sequence
* number to indicate the zil transaction has replayed.
*
* (6) At the end of each vnode op, the DMU tx must always commit,
* regardless of whether there were any errors.
*
* (7) After dropping all locks, invoke zil_commit(zilog, foid)
* to ensure that synchronous semantics are provided when necessary.
*
* In general, this is how things should be ordered in each vnode op:
*
* ZFS_ENTER(zfsvfs); // exit if unmounted
* top:
* zfs_dirent_lookup(&dl, ...) // lock directory entry (may VN_HOLD())
* rw_enter(...); // grab any other locks you need
* tx = dmu_tx_create(...); // get DMU tx
* dmu_tx_hold_*(); // hold each object you might modify
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
* if (error) {
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* VN_RELE(...); // release held vnodes
* if (error == ERESTART) {
* waited = B_TRUE;
* dmu_tx_wait(tx);
* dmu_tx_abort(tx);
* goto top;
* }
* dmu_tx_abort(tx); // abort DMU tx
* ZFS_EXIT(zfsvfs); // finished in zfs
* return (error); // really out of space
* }
* error = do_real_work(); // do whatever this VOP does
* if (error == 0)
* zfs_log_*(...); // on success, make ZIL entry
* dmu_tx_commit(tx); // commit DMU tx -- error or not
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* VN_RELE(...); // release held vnodes
* zil_commit(zilog, foid); // synchronous when necessary
* ZFS_EXIT(zfsvfs); // finished in zfs
* return (error); // done, report error
*/
/* ARGSUSED */
static int
zfs_open(vnode_t **vpp, int flag, cred_t *cr)
{
znode_t *zp = VTOZ(*vpp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if ((flag & FWRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
((flag & FAPPEND) == 0)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
/* Keep a count of the synchronous opens in the znode */
if (flag & (FSYNC | FDSYNC))
atomic_inc_32(&zp->z_sync_cnt);
ZFS_EXIT(zfsvfs);
return (0);
}
/* ARGSUSED */
static int
zfs_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *cr)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
/* Decrement the synchronous opens in the znode */
if ((flag & (FSYNC | FDSYNC)) && (count == 1))
atomic_dec_32(&zp->z_sync_cnt);
ZFS_EXIT(zfsvfs);
return (0);
}
/* ARGSUSED */
static int
zfs_ioctl(vnode_t *vp, ulong_t com, intptr_t data, int flag, cred_t *cred,
int *rvalp)
{
loff_t off;
int error;
switch (com) {
case _FIOFFS:
{
return (0);
/*
* The following two ioctls are used by bfu. Faking out,
* necessary to avoid bfu errors.
*/
}
case _FIOGDIO:
case _FIOSDIO:
{
return (0);
}
case F_SEEK_DATA:
case F_SEEK_HOLE:
{
off = *(offset_t *)data;
/* offset parameter is in/out */
error = zfs_holey(VTOZ(vp), com, &off);
if (error)
return (error);
*(offset_t *)data = off;
return (0);
}
}
return (SET_ERROR(ENOTTY));
}
static vm_page_t
page_busy(vnode_t *vp, int64_t start, int64_t off, int64_t nbytes)
{
vm_object_t obj;
vm_page_t pp;
int64_t end;
/*
* At present vm_page_clear_dirty extends the cleared range to DEV_BSIZE
* aligned boundaries, if the range is not aligned. As a result a
* DEV_BSIZE subrange with partially dirty data may get marked as clean.
* It may happen that all DEV_BSIZE subranges are marked clean and thus
* the whole page would be considered clean despite have some
* dirty data.
* For this reason we should shrink the range to DEV_BSIZE aligned
* boundaries before calling vm_page_clear_dirty.
*/
end = rounddown2(off + nbytes, DEV_BSIZE);
off = roundup2(off, DEV_BSIZE);
nbytes = end - off;
obj = vp->v_object;
zfs_vmobject_assert_wlocked_12(obj);
#if __FreeBSD_version < 1300050
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
pp->valid) {
if (vm_page_xbusied(pp)) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_reference(pp);
vm_page_lock(pp);
zfs_vmobject_wunlock(obj);
vm_page_busy_sleep(pp, "zfsmwb", true);
zfs_vmobject_wlock(obj);
continue;
}
vm_page_sbusy(pp);
} else if (pp != NULL) {
ASSERT(!pp->valid);
pp = NULL;
}
if (pp != NULL) {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_object_pip_add(obj, 1);
pmap_remove_write(pp);
if (nbytes != 0)
vm_page_clear_dirty(pp, off, nbytes);
}
break;
}
#else
vm_page_grab_valid_unlocked(&pp, obj, OFF_TO_IDX(start),
VM_ALLOC_NOCREAT | VM_ALLOC_SBUSY | VM_ALLOC_NORMAL |
VM_ALLOC_IGN_SBUSY);
if (pp != NULL) {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_object_pip_add(obj, 1);
pmap_remove_write(pp);
if (nbytes != 0)
vm_page_clear_dirty(pp, off, nbytes);
}
#endif
return (pp);
}
static void
page_unbusy(vm_page_t pp)
{
vm_page_sunbusy(pp);
#if __FreeBSD_version >= 1300041
vm_object_pip_wakeup(pp->object);
#else
vm_object_pip_subtract(pp->object, 1);
#endif
}
#if __FreeBSD_version > 1300051
static vm_page_t
page_hold(vnode_t *vp, int64_t start)
{
vm_object_t obj;
vm_page_t m;
obj = vp->v_object;
vm_page_grab_valid_unlocked(&m, obj, OFF_TO_IDX(start),
VM_ALLOC_NOCREAT | VM_ALLOC_WIRED | VM_ALLOC_IGN_SBUSY |
VM_ALLOC_NOBUSY);
return (m);
}
#else
static vm_page_t
page_hold(vnode_t *vp, int64_t start)
{
vm_object_t obj;
vm_page_t pp;
obj = vp->v_object;
zfs_vmobject_assert_wlocked(obj);
for (;;) {
if ((pp = vm_page_lookup(obj, OFF_TO_IDX(start))) != NULL &&
pp->valid) {
if (vm_page_xbusied(pp)) {
/*
* Reference the page before unlocking and
* sleeping so that the page daemon is less
* likely to reclaim it.
*/
vm_page_reference(pp);
vm_page_lock(pp);
zfs_vmobject_wunlock(obj);
vm_page_busy_sleep(pp, "zfsmwb", true);
zfs_vmobject_wlock(obj);
continue;
}
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_page_wire_lock(pp);
vm_page_hold(pp);
vm_page_wire_unlock(pp);
} else
pp = NULL;
break;
}
return (pp);
}
#endif
static void
page_unhold(vm_page_t pp)
{
vm_page_wire_lock(pp);
#if __FreeBSD_version >= 1300035
vm_page_unwire(pp, PQ_ACTIVE);
#else
vm_page_unhold(pp);
#endif
vm_page_wire_unlock(pp);
}
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Write: If we find a memory mapped page, we write to *both*
* the page and the dmu buffer.
*/
void
update_pages(znode_t *zp, int64_t start, int len, objset_t *os)
{
vm_object_t obj;
struct sf_buf *sf;
vnode_t *vp = ZTOV(zp);
caddr_t va;
int off;
ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object;
ASSERT3P(obj, !=, NULL);
off = start & PAGEOFFSET;
zfs_vmobject_wlock_12(obj);
#if __FreeBSD_version >= 1300041
vm_object_pip_add(obj, 1);
#endif
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
int nbytes = imin(PAGESIZE - off, len);
if ((pp = page_busy(vp, start, off, nbytes)) != NULL) {
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
(void) dmu_read(os, zp->z_id, start + off, nbytes,
va + off, DMU_READ_PREFETCH);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
page_unbusy(pp);
}
len -= nbytes;
off = 0;
}
#if __FreeBSD_version >= 1300041
vm_object_pip_wakeup(obj);
#else
vm_object_pip_wakeupn(obj, 0);
#endif
zfs_vmobject_wunlock_12(obj);
}
/*
* Read with UIO_NOCOPY flag means that sendfile(2) requests
* ZFS to populate a range of page cache pages with data.
*
* NOTE: this function could be optimized to pre-allocate
* all pages in advance, drain exclusive busy on all of them,
* map them into contiguous KVA region and populate them
* in one single dmu_read() call.
*/
int
mappedread_sf(znode_t *zp, int nbytes, zfs_uio_t *uio)
{
vnode_t *vp = ZTOV(zp);
objset_t *os = zp->z_zfsvfs->z_os;
struct sf_buf *sf;
vm_object_t obj;
vm_page_t pp;
int64_t start;
caddr_t va;
int len = nbytes;
int error = 0;
ASSERT3U(zfs_uio_segflg(uio), ==, UIO_NOCOPY);
ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object;
ASSERT3P(obj, !=, NULL);
ASSERT0(zfs_uio_offset(uio) & PAGEOFFSET);
zfs_vmobject_wlock_12(obj);
for (start = zfs_uio_offset(uio); len > 0; start += PAGESIZE) {
int bytes = MIN(PAGESIZE, len);
pp = vm_page_grab_unlocked(obj, OFF_TO_IDX(start),
VM_ALLOC_SBUSY | VM_ALLOC_NORMAL | VM_ALLOC_IGN_SBUSY);
if (vm_page_none_valid(pp)) {
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
error = dmu_read(os, zp->z_id, start, bytes, va,
DMU_READ_PREFETCH);
if (bytes != PAGESIZE && error == 0)
bzero(va + bytes, PAGESIZE - bytes);
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
#if __FreeBSD_version >= 1300081
if (error == 0) {
vm_page_valid(pp);
vm_page_activate(pp);
vm_page_do_sunbusy(pp);
} else {
zfs_vmobject_wlock(obj);
if (!vm_page_wired(pp) && pp->valid == 0 &&
vm_page_busy_tryupgrade(pp))
vm_page_free(pp);
else
vm_page_sunbusy(pp);
zfs_vmobject_wunlock(obj);
}
#else
vm_page_do_sunbusy(pp);
vm_page_lock(pp);
if (error) {
if (pp->wire_count == 0 && pp->valid == 0 &&
!vm_page_busied(pp))
vm_page_free(pp);
} else {
pp->valid = VM_PAGE_BITS_ALL;
vm_page_activate(pp);
}
vm_page_unlock(pp);
#endif
} else {
ASSERT3U(pp->valid, ==, VM_PAGE_BITS_ALL);
vm_page_do_sunbusy(pp);
}
if (error)
break;
zfs_uio_advance(uio, bytes);
len -= bytes;
}
zfs_vmobject_wunlock_12(obj);
return (error);
}
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Read: We "read" preferentially from memory mapped pages,
* else we default from the dmu buffer.
*
* NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
* the file is memory mapped.
*/
int
mappedread(znode_t *zp, int nbytes, zfs_uio_t *uio)
{
vnode_t *vp = ZTOV(zp);
vm_object_t obj;
int64_t start;
int len = nbytes;
int off;
int error = 0;
ASSERT3P(vp->v_mount, !=, NULL);
obj = vp->v_object;
ASSERT3P(obj, !=, NULL);
start = zfs_uio_offset(uio);
off = start & PAGEOFFSET;
zfs_vmobject_wlock_12(obj);
for (start &= PAGEMASK; len > 0; start += PAGESIZE) {
vm_page_t pp;
uint64_t bytes = MIN(PAGESIZE - off, len);
if ((pp = page_hold(vp, start))) {
struct sf_buf *sf;
caddr_t va;
zfs_vmobject_wunlock_12(obj);
va = zfs_map_page(pp, &sf);
error = vn_io_fault_uiomove(va + off, bytes,
GET_UIO_STRUCT(uio));
zfs_unmap_page(sf);
zfs_vmobject_wlock_12(obj);
page_unhold(pp);
} else {
zfs_vmobject_wunlock_12(obj);
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, bytes);
zfs_vmobject_wlock_12(obj);
}
len -= bytes;
off = 0;
if (error)
break;
}
zfs_vmobject_wunlock_12(obj);
return (error);
}
int
zfs_write_simple(znode_t *zp, const void *data, size_t len,
loff_t pos, size_t *presid)
{
int error = 0;
ssize_t resid;
error = vn_rdwr(UIO_WRITE, ZTOV(zp), __DECONST(void *, data), len, pos,
UIO_SYSSPACE, IO_SYNC, kcred, NOCRED, &resid, curthread);
if (error) {
return (SET_ERROR(error));
} else if (presid == NULL) {
if (resid != 0) {
error = SET_ERROR(EIO);
}
} else {
*presid = resid;
}
return (error);
}
void
zfs_zrele_async(znode_t *zp)
{
vnode_t *vp = ZTOV(zp);
objset_t *os = ITOZSB(vp)->z_os;
VN_RELE_ASYNC(vp, dsl_pool_zrele_taskq(dmu_objset_pool(os)));
}
static int
zfs_dd_callback(struct mount *mp, void *arg, int lkflags, struct vnode **vpp)
{
int error;
*vpp = arg;
error = vn_lock(*vpp, lkflags);
if (error != 0)
vrele(*vpp);
return (error);
}
static int
zfs_lookup_lock(vnode_t *dvp, vnode_t *vp, const char *name, int lkflags)
{
znode_t *zdp = VTOZ(dvp);
zfsvfs_t *zfsvfs __unused = zdp->z_zfsvfs;
int error;
int ltype;
if (zfsvfs->z_replay == B_FALSE)
ASSERT_VOP_LOCKED(dvp, __func__);
if (name[0] == 0 || (name[0] == '.' && name[1] == 0)) {
ASSERT3P(dvp, ==, vp);
vref(dvp);
ltype = lkflags & LK_TYPE_MASK;
if (ltype != VOP_ISLOCKED(dvp)) {
if (ltype == LK_EXCLUSIVE)
vn_lock(dvp, LK_UPGRADE | LK_RETRY);
else /* if (ltype == LK_SHARED) */
vn_lock(dvp, LK_DOWNGRADE | LK_RETRY);
/*
* Relock for the "." case could leave us with
* reclaimed vnode.
*/
if (VN_IS_DOOMED(dvp)) {
vrele(dvp);
return (SET_ERROR(ENOENT));
}
}
return (0);
} else if (name[0] == '.' && name[1] == '.' && name[2] == 0) {
/*
* Note that in this case, dvp is the child vnode, and we
* are looking up the parent vnode - exactly reverse from
* normal operation. Unlocking dvp requires some rather
* tricky unlock/relock dance to prevent mp from being freed;
* use vn_vget_ino_gen() which takes care of all that.
*
* XXX Note that there is a time window when both vnodes are
* unlocked. It is possible, although highly unlikely, that
* during that window the parent-child relationship between
* the vnodes may change, for example, get reversed.
* In that case we would have a wrong lock order for the vnodes.
* All other filesystems seem to ignore this problem, so we
* do the same here.
* A potential solution could be implemented as follows:
* - using LK_NOWAIT when locking the second vnode and retrying
* if necessary
* - checking that the parent-child relationship still holds
* after locking both vnodes and retrying if it doesn't
*/
error = vn_vget_ino_gen(dvp, zfs_dd_callback, vp, lkflags, &vp);
return (error);
} else {
error = vn_lock(vp, lkflags);
if (error != 0)
vrele(vp);
return (error);
}
}
/*
* Lookup an entry in a directory, or an extended attribute directory.
* If it exists, return a held vnode reference for it.
*
* IN: dvp - vnode of directory to search.
* nm - name of entry to lookup.
* pnp - full pathname to lookup [UNUSED].
* flags - LOOKUP_XATTR set if looking for an attribute.
* rdir - root directory vnode [UNUSED].
* cr - credentials of caller.
* ct - caller context
*
* OUT: vpp - vnode of located entry, NULL if not found.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* NA
*/
/* ARGSUSED */
static int
zfs_lookup(vnode_t *dvp, const char *nm, vnode_t **vpp,
struct componentname *cnp, int nameiop, cred_t *cr, kthread_t *td,
int flags, boolean_t cached)
{
znode_t *zdp = VTOZ(dvp);
znode_t *zp;
zfsvfs_t *zfsvfs = zdp->z_zfsvfs;
#if __FreeBSD_version > 1300124
seqc_t dvp_seqc;
#endif
int error = 0;
/*
* Fast path lookup, however we must skip DNLC lookup
* for case folding or normalizing lookups because the
* DNLC code only stores the passed in name. This means
* creating 'a' and removing 'A' on a case insensitive
* file system would work, but DNLC still thinks 'a'
* exists and won't let you create it again on the next
* pass through fast path.
*/
if (!(flags & LOOKUP_XATTR)) {
if (dvp->v_type != VDIR) {
return (SET_ERROR(ENOTDIR));
} else if (zdp->z_sa_hdl == NULL) {
return (SET_ERROR(EIO));
}
}
DTRACE_PROBE2(zfs__fastpath__lookup__miss, vnode_t *, dvp,
const char *, nm);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zdp);
#if __FreeBSD_version > 1300124
dvp_seqc = vn_seqc_read_notmodify(dvp);
#endif
*vpp = NULL;
if (flags & LOOKUP_XATTR) {
/*
* If the xattr property is off, refuse the lookup request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EOPNOTSUPP));
}
/*
* We don't allow recursive attributes..
* Maybe someday we will.
*/
if (zdp->z_pflags & ZFS_XATTR) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if ((error = zfs_get_xattrdir(VTOZ(dvp), &zp, cr, flags))) {
ZFS_EXIT(zfsvfs);
return (error);
}
*vpp = ZTOV(zp);
/*
* Do we have permission to get into attribute directory?
*/
error = zfs_zaccess(zp, ACE_EXECUTE, 0, B_FALSE, cr);
if (error) {
vrele(ZTOV(zp));
}
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Check accessibility of directory if we're not coming in via
* VOP_CACHEDLOOKUP.
*/
if (!cached) {
#ifdef NOEXECCHECK
if ((cnp->cn_flags & NOEXECCHECK) != 0) {
cnp->cn_flags &= ~NOEXECCHECK;
} else
#endif
if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
/*
* First handle the special cases.
*/
if ((cnp->cn_flags & ISDOTDOT) != 0) {
/*
* If we are a snapshot mounted under .zfs, return
* the vp for the snapshot directory.
*/
if (zdp->z_id == zfsvfs->z_root && zfsvfs->z_parent != zfsvfs) {
struct componentname cn;
vnode_t *zfsctl_vp;
int ltype;
ZFS_EXIT(zfsvfs);
ltype = VOP_ISLOCKED(dvp);
VOP_UNLOCK1(dvp);
error = zfsctl_root(zfsvfs->z_parent, LK_SHARED,
&zfsctl_vp);
if (error == 0) {
cn.cn_nameptr = "snapshot";
cn.cn_namelen = strlen(cn.cn_nameptr);
cn.cn_nameiop = cnp->cn_nameiop;
cn.cn_flags = cnp->cn_flags & ~ISDOTDOT;
cn.cn_lkflags = cnp->cn_lkflags;
error = VOP_LOOKUP(zfsctl_vp, vpp, &cn);
vput(zfsctl_vp);
}
vn_lock(dvp, ltype | LK_RETRY);
return (error);
}
}
if (zfs_has_ctldir(zdp) && strcmp(nm, ZFS_CTLDIR_NAME) == 0) {
ZFS_EXIT(zfsvfs);
if ((cnp->cn_flags & ISLASTCN) != 0 && nameiop != LOOKUP)
return (SET_ERROR(ENOTSUP));
error = zfsctl_root(zfsvfs, cnp->cn_lkflags, vpp);
return (error);
}
/*
* The loop is retry the lookup if the parent-child relationship
* changes during the dot-dot locking complexities.
*/
for (;;) {
uint64_t parent;
error = zfs_dirlook(zdp, nm, &zp);
if (error == 0)
*vpp = ZTOV(zp);
ZFS_EXIT(zfsvfs);
if (error != 0)
break;
error = zfs_lookup_lock(dvp, *vpp, nm, cnp->cn_lkflags);
if (error != 0) {
/*
* If we've got a locking error, then the vnode
* got reclaimed because of a force unmount.
* We never enter doomed vnodes into the name cache.
*/
*vpp = NULL;
return (error);
}
if ((cnp->cn_flags & ISDOTDOT) == 0)
break;
ZFS_ENTER(zfsvfs);
if (zdp->z_sa_hdl == NULL) {
error = SET_ERROR(EIO);
} else {
error = sa_lookup(zdp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent));
}
if (error != 0) {
ZFS_EXIT(zfsvfs);
vput(ZTOV(zp));
break;
}
if (zp->z_id == parent) {
ZFS_EXIT(zfsvfs);
break;
}
vput(ZTOV(zp));
}
if (error != 0)
*vpp = NULL;
/* Translate errors and add SAVENAME when needed. */
if (cnp->cn_flags & ISLASTCN) {
switch (nameiop) {
case CREATE:
case RENAME:
if (error == ENOENT) {
error = EJUSTRETURN;
cnp->cn_flags |= SAVENAME;
break;
}
- /* FALLTHROUGH */
+ fallthrough;
case DELETE:
if (error == 0)
cnp->cn_flags |= SAVENAME;
break;
}
}
#if __FreeBSD_version > 1300124
if ((cnp->cn_flags & ISDOTDOT) != 0) {
/*
* FIXME: zfs_lookup_lock relocks vnodes and does nothing to
* handle races. In particular different callers may end up
* with different vnodes and will try to add conflicting
* entries to the namecache.
*
* While finding different result may be acceptable in face
* of concurrent modification, adding conflicting entries
* trips over an assert in the namecache.
*
* Ultimately let an entry through once everything settles.
*/
if (!vn_seqc_consistent(dvp, dvp_seqc)) {
cnp->cn_flags &= ~MAKEENTRY;
}
}
#endif
/* Insert name into cache (as non-existent) if appropriate. */
if (zfsvfs->z_use_namecache && !zfsvfs->z_replay &&
error == ENOENT && (cnp->cn_flags & MAKEENTRY) != 0)
cache_enter(dvp, NULL, cnp);
/* Insert name into cache if appropriate. */
if (zfsvfs->z_use_namecache && !zfsvfs->z_replay &&
error == 0 && (cnp->cn_flags & MAKEENTRY)) {
if (!(cnp->cn_flags & ISLASTCN) ||
(nameiop != DELETE && nameiop != RENAME)) {
cache_enter(dvp, *vpp, cnp);
}
}
return (error);
}
/*
* Attempt to create a new entry in a directory. If the entry
* already exists, truncate the file if permissible, else return
* an error. Return the vp of the created or trunc'd file.
*
* IN: dvp - vnode of directory to put new file entry in.
* name - name of new file entry.
* vap - attributes of new file.
* excl - flag indicating exclusive or non-exclusive mode.
* mode - mode to open file with.
* cr - credentials of caller.
* flag - large file flag [UNUSED].
* ct - caller context
* vsecp - ACL to be set
*
* OUT: vpp - vnode of created or trunc'd entry.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated if new entry created
* vp - ctime|mtime always, atime if new
*/
/* ARGSUSED */
int
zfs_create(znode_t *dzp, const char *name, vattr_t *vap, int excl, int mode,
znode_t **zpp, cred_t *cr, int flag, vsecattr_t *vsecp)
{
znode_t *zp;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
objset_t *os;
dmu_tx_t *tx;
int error;
ksid_t *ksid;
uid_t uid;
gid_t gid = crgetgid(cr);
uint64_t projid = ZFS_DEFAULT_PROJID;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
uint64_t txtype;
#ifdef DEBUG_VFS_LOCKS
vnode_t *dvp = ZTOV(dzp);
#endif
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
ksid = crgetsid(cr, KSID_OWNER);
if (ksid)
uid = ksid_getid(ksid);
else
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || (vap->va_mask & AT_XVATTR) ||
IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
os = zfsvfs->z_os;
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & AT_XVATTR) {
if ((error = secpolicy_xvattr(ZTOV(dzp), (xvattr_t *)vap,
crgetuid(cr), cr, vap->va_type)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
*zpp = NULL;
if ((vap->va_mode & S_ISVTX) && secpolicy_vnode_stky_modify(cr))
vap->va_mode &= ~S_ISVTX;
error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
if (error) {
ZFS_EXIT(zfsvfs);
return (error);
}
ASSERT3P(zp, ==, NULL);
/*
* Create a new file object and update the directory
* to reference it.
*/
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
goto out;
}
/*
* We only support the creation of regular files in
* extended attribute directories.
*/
if ((dzp->z_pflags & ZFS_XATTR) &&
(vap->va_type != VREG)) {
error = SET_ERROR(EINVAL);
goto out;
}
if ((error = zfs_acl_ids_create(dzp, 0, vap,
cr, vsecp, &acl_ids)) != 0)
goto out;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
projid = zfs_inherit_projid(dzp);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
}
getnewvnode_reserve_();
tx = dmu_tx_create(os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
getnewvnode_drop_reserve();
ZFS_EXIT(zfsvfs);
return (error);
}
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
(void) zfs_link_create(dzp, name, zp, tx, ZNEW);
txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
zfs_log_create(zilog, tx, txtype, dzp, zp, name,
vsecp, acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
getnewvnode_drop_reserve();
out:
VNCHECKREF(dvp);
if (error == 0) {
*zpp = zp;
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Remove an entry from a directory.
*
* IN: dvp - vnode of directory to remove entry from.
* name - name of entry to remove.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime
* vp - ctime (if nlink > 0)
*/
/*ARGSUSED*/
static int
zfs_remove_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
{
znode_t *dzp = VTOZ(dvp);
znode_t *zp;
znode_t *xzp;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
uint64_t xattr_obj;
uint64_t obj = 0;
dmu_tx_t *tx;
boolean_t unlinked;
uint64_t txtype;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zp = VTOZ(vp);
ZFS_VERIFY_ZP(zp);
zilog = zfsvfs->z_log;
xattr_obj = 0;
xzp = NULL;
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
}
/*
* Need to use rmdir for removing directories.
*/
if (vp->v_type == VDIR) {
error = SET_ERROR(EPERM);
goto out;
}
vnevent_remove(vp, dvp, name, ct);
obj = zp->z_id;
/* are there any extended attributes? */
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (error == 0 && xattr_obj) {
error = zfs_zget(zfsvfs, xattr_obj, &xzp);
ASSERT0(error);
}
/*
* We may delete the znode now, or we may put it in the unlinked set;
* it depends on whether we're the last link, and on whether there are
* other holds on the vnode. So we dmu_tx_hold() the right things to
* allow for either case.
*/
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
if (xzp) {
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
}
/* charge as an update -- would be nice not to charge at all */
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
/*
* Mark this transaction as typically resulting in a net free of space
*/
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Remove the directory entry.
*/
error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, &unlinked);
if (error) {
dmu_tx_commit(tx);
goto out;
}
if (unlinked) {
zfs_unlinked_add(zp, tx);
vp->v_vflag |= VV_NOSYNC;
}
/* XXX check changes to linux vnops */
txtype = TX_REMOVE;
zfs_log_remove(zilog, tx, txtype, dzp, name, obj, unlinked);
dmu_tx_commit(tx);
out:
if (xzp)
vrele(ZTOV(xzp));
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
static int
zfs_lookup_internal(znode_t *dzp, const char *name, vnode_t **vpp,
struct componentname *cnp, int nameiop)
{
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
int error;
cnp->cn_nameptr = __DECONST(char *, name);
cnp->cn_namelen = strlen(name);
cnp->cn_nameiop = nameiop;
cnp->cn_flags = ISLASTCN | SAVENAME;
cnp->cn_lkflags = LK_EXCLUSIVE | LK_RETRY;
cnp->cn_cred = kcred;
cnp->cn_thread = curthread;
if (zfsvfs->z_use_namecache && !zfsvfs->z_replay) {
struct vop_lookup_args a;
a.a_gen.a_desc = &vop_lookup_desc;
a.a_dvp = ZTOV(dzp);
a.a_vpp = vpp;
a.a_cnp = cnp;
error = vfs_cache_lookup(&a);
} else {
error = zfs_lookup(ZTOV(dzp), name, vpp, cnp, nameiop, kcred,
curthread, 0, B_FALSE);
}
#ifdef ZFS_DEBUG
if (error) {
printf("got error %d on name %s on op %d\n", error, name,
nameiop);
kdb_backtrace();
}
#endif
return (error);
}
int
zfs_remove(znode_t *dzp, const char *name, cred_t *cr, int flags)
{
vnode_t *vp;
int error;
struct componentname cn;
if ((error = zfs_lookup_internal(dzp, name, &vp, &cn, DELETE)))
return (error);
error = zfs_remove_(ZTOV(dzp), vp, name, cr);
vput(vp);
return (error);
}
/*
* Create a new directory and insert it into dvp using the name
* provided. Return a pointer to the inserted directory.
*
* IN: dvp - vnode of directory to add subdir to.
* dirname - name of new directory.
* vap - attributes of new directory.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
* vsecp - ACL to be set
*
* OUT: vpp - vnode of created directory.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated
* vp - ctime|mtime|atime updated
*/
/*ARGSUSED*/
int
zfs_mkdir(znode_t *dzp, const char *dirname, vattr_t *vap, znode_t **zpp,
cred_t *cr, int flags, vsecattr_t *vsecp)
{
znode_t *zp;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
uint64_t txtype;
dmu_tx_t *tx;
int error;
ksid_t *ksid;
uid_t uid;
gid_t gid = crgetgid(cr);
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
ASSERT3U(vap->va_type, ==, VDIR);
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
ksid = crgetsid(cr, KSID_OWNER);
if (ksid)
uid = ksid_getid(ksid);
else
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
((vap->va_mask & AT_XVATTR) ||
IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zilog = zfsvfs->z_log;
if (dzp->z_pflags & ZFS_XATTR) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if (zfsvfs->z_utf8 && u8_validate(dirname,
strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & AT_XVATTR) {
if ((error = secpolicy_xvattr(ZTOV(dzp), (xvattr_t *)vap,
crgetuid(cr), cr, vap->va_type)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
NULL, &acl_ids)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* First make sure the new directory doesn't exist.
*
* Existence is checked first to make sure we don't return
* EACCES instead of EEXIST which can cause some applications
* to fail.
*/
*zpp = NULL;
if ((error = zfs_dirent_lookup(dzp, dirname, &zp, ZNEW))) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (error);
}
ASSERT3P(zp, ==, NULL);
if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EDQUOT));
}
/*
* Add a new entry to the directory.
*/
getnewvnode_reserve_();
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
getnewvnode_drop_reserve();
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Create new node.
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
/*
* Now put new name in parent dir.
*/
(void) zfs_link_create(dzp, dirname, zp, tx, ZNEW);
*zpp = zp;
txtype = zfs_log_create_txtype(Z_DIR, NULL, vap);
zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, NULL,
acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
getnewvnode_drop_reserve();
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (0);
}
#if __FreeBSD_version < 1300124
static void
cache_vop_rmdir(struct vnode *dvp, struct vnode *vp)
{
cache_purge(dvp);
cache_purge(vp);
}
#endif
/*
* Remove a directory subdir entry. If the current working
* directory is the same as the subdir to be removed, the
* remove will fail.
*
* IN: dvp - vnode of directory to remove from.
* name - name of directory to be removed.
* cwd - vnode of current working directory.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated
*/
/*ARGSUSED*/
static int
zfs_rmdir_(vnode_t *dvp, vnode_t *vp, const char *name, cred_t *cr)
{
znode_t *dzp = VTOZ(dvp);
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
dmu_tx_t *tx;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
ZFS_VERIFY_ZP(zp);
zilog = zfsvfs->z_log;
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
}
if (vp->v_type != VDIR) {
error = SET_ERROR(ENOTDIR);
goto out;
}
vnevent_rmdir(vp, dvp, name, ct);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
error = zfs_link_destroy(dzp, name, zp, tx, ZEXISTS, NULL);
if (error == 0) {
uint64_t txtype = TX_RMDIR;
zfs_log_remove(zilog, tx, txtype, dzp, name,
ZFS_NO_OBJECT, B_FALSE);
}
dmu_tx_commit(tx);
cache_vop_rmdir(dvp, vp);
out:
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
int
zfs_rmdir(znode_t *dzp, const char *name, znode_t *cwd, cred_t *cr, int flags)
{
struct componentname cn;
vnode_t *vp;
int error;
if ((error = zfs_lookup_internal(dzp, name, &vp, &cn, DELETE)))
return (error);
error = zfs_rmdir_(ZTOV(dzp), vp, name, cr);
vput(vp);
return (error);
}
/*
* Read as many directory entries as will fit into the provided
* buffer from the given directory cursor position (specified in
* the uio structure).
*
* IN: vp - vnode of directory to read.
* uio - structure supplying read location, range info,
* and return buffer.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* OUT: uio - updated offset and range, buffer filled.
* eofp - set to true if end-of-file detected.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - atime updated
*
* Note that the low 4 bits of the cookie returned by zap is always zero.
* This allows us to use the low range for "special" directory entries:
* We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
* we use the offset 2 for the '.zfs' directory.
*/
/* ARGSUSED */
static int
zfs_readdir(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, int *eofp,
int *ncookies, ulong_t **cookies)
{
znode_t *zp = VTOZ(vp);
iovec_t *iovp;
edirent_t *eodp;
dirent64_t *odp;
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
objset_t *os;
caddr_t outbuf;
size_t bufsize;
zap_cursor_t zc;
zap_attribute_t zap;
uint_t bytes_wanted;
uint64_t offset; /* must be unsigned; checks for < 1 */
uint64_t parent;
int local_eof;
int outcount;
int error;
uint8_t prefetch;
boolean_t check_sysattrs;
uint8_t type;
int ncooks;
ulong_t *cooks = NULL;
int flags = 0;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent))) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* If we are not given an eof variable,
* use a local one.
*/
if (eofp == NULL)
eofp = &local_eof;
/*
* Check for valid iov_len.
*/
if (GET_UIO_STRUCT(uio)->uio_iov->iov_len <= 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Quit if directory has been removed (posix)
*/
if ((*eofp = zp->z_unlinked) != 0) {
ZFS_EXIT(zfsvfs);
return (0);
}
error = 0;
os = zfsvfs->z_os;
offset = zfs_uio_offset(uio);
prefetch = zp->z_zn_prefetch;
/*
* Initialize the iterator cursor.
*/
if (offset <= 3) {
/*
* Start iteration from the beginning of the directory.
*/
zap_cursor_init(&zc, os, zp->z_id);
} else {
/*
* The offset is a serialized cursor.
*/
zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
}
/*
* Get space to change directory entries into fs independent format.
*/
iovp = GET_UIO_STRUCT(uio)->uio_iov;
bytes_wanted = iovp->iov_len;
if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1) {
bufsize = bytes_wanted;
outbuf = kmem_alloc(bufsize, KM_SLEEP);
odp = (struct dirent64 *)outbuf;
} else {
bufsize = bytes_wanted;
outbuf = NULL;
odp = (struct dirent64 *)iovp->iov_base;
}
eodp = (struct edirent *)odp;
if (ncookies != NULL) {
/*
* Minimum entry size is dirent size and 1 byte for a file name.
*/
ncooks = zfs_uio_resid(uio) / (sizeof (struct dirent) -
sizeof (((struct dirent *)NULL)->d_name) + 1);
cooks = malloc(ncooks * sizeof (ulong_t), M_TEMP, M_WAITOK);
*cookies = cooks;
*ncookies = ncooks;
}
/*
* If this VFS supports the system attribute view interface; and
* we're looking at an extended attribute directory; and we care
* about normalization conflicts on this vfs; then we must check
* for normalization conflicts with the sysattr name space.
*/
#ifdef TODO
check_sysattrs = vfs_has_feature(vp->v_vfsp, VFSFT_SYSATTR_VIEWS) &&
(vp->v_flag & V_XATTRDIR) && zfsvfs->z_norm &&
(flags & V_RDDIR_ENTFLAGS);
#else
check_sysattrs = 0;
#endif
/*
* Transform to file-system independent format
*/
outcount = 0;
while (outcount < bytes_wanted) {
ino64_t objnum;
ushort_t reclen;
off64_t *next = NULL;
/*
* Special case `.', `..', and `.zfs'.
*/
if (offset == 0) {
(void) strcpy(zap.za_name, ".");
zap.za_normalization_conflict = 0;
objnum = zp->z_id;
type = DT_DIR;
} else if (offset == 1) {
(void) strcpy(zap.za_name, "..");
zap.za_normalization_conflict = 0;
objnum = parent;
type = DT_DIR;
} else if (offset == 2 && zfs_show_ctldir(zp)) {
(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
zap.za_normalization_conflict = 0;
objnum = ZFSCTL_INO_ROOT;
type = DT_DIR;
} else {
/*
* Grab next entry.
*/
if ((error = zap_cursor_retrieve(&zc, &zap))) {
if ((*eofp = (error == ENOENT)) != 0)
break;
else
goto update;
}
if (zap.za_integer_length != 8 ||
zap.za_num_integers != 1) {
cmn_err(CE_WARN, "zap_readdir: bad directory "
"entry, obj = %lld, offset = %lld\n",
(u_longlong_t)zp->z_id,
(u_longlong_t)offset);
error = SET_ERROR(ENXIO);
goto update;
}
objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
/*
* MacOS X can extract the object type here such as:
* uint8_t type = ZFS_DIRENT_TYPE(zap.za_first_integer);
*/
type = ZFS_DIRENT_TYPE(zap.za_first_integer);
if (check_sysattrs && !zap.za_normalization_conflict) {
#ifdef TODO
zap.za_normalization_conflict =
xattr_sysattr_casechk(zap.za_name);
#else
panic("%s:%u: TODO", __func__, __LINE__);
#endif
}
}
if (flags & V_RDDIR_ACCFILTER) {
/*
* If we have no access at all, don't include
* this entry in the returned information
*/
znode_t *ezp;
if (zfs_zget(zp->z_zfsvfs, objnum, &ezp) != 0)
goto skip_entry;
if (!zfs_has_access(ezp, cr)) {
vrele(ZTOV(ezp));
goto skip_entry;
}
vrele(ZTOV(ezp));
}
if (flags & V_RDDIR_ENTFLAGS)
reclen = EDIRENT_RECLEN(strlen(zap.za_name));
else
reclen = DIRENT64_RECLEN(strlen(zap.za_name));
/*
* Will this entry fit in the buffer?
*/
if (outcount + reclen > bufsize) {
/*
* Did we manage to fit anything in the buffer?
*/
if (!outcount) {
error = SET_ERROR(EINVAL);
goto update;
}
break;
}
if (flags & V_RDDIR_ENTFLAGS) {
/*
* Add extended flag entry:
*/
eodp->ed_ino = objnum;
eodp->ed_reclen = reclen;
/* NOTE: ed_off is the offset for the *next* entry */
next = &(eodp->ed_off);
eodp->ed_eflags = zap.za_normalization_conflict ?
ED_CASE_CONFLICT : 0;
(void) strncpy(eodp->ed_name, zap.za_name,
EDIRENT_NAMELEN(reclen));
eodp = (edirent_t *)((intptr_t)eodp + reclen);
} else {
/*
* Add normal entry:
*/
odp->d_ino = objnum;
odp->d_reclen = reclen;
odp->d_namlen = strlen(zap.za_name);
/* NOTE: d_off is the offset for the *next* entry. */
next = &odp->d_off;
strlcpy(odp->d_name, zap.za_name, odp->d_namlen + 1);
odp->d_type = type;
dirent_terminate(odp);
odp = (dirent64_t *)((intptr_t)odp + reclen);
}
outcount += reclen;
ASSERT3S(outcount, <=, bufsize);
/* Prefetch znode */
if (prefetch)
dmu_prefetch(os, objnum, 0, 0, 0,
ZIO_PRIORITY_SYNC_READ);
skip_entry:
/*
* Move to the next entry, fill in the previous offset.
*/
if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
zap_cursor_advance(&zc);
offset = zap_cursor_serialize(&zc);
} else {
offset += 1;
}
/* Fill the offset right after advancing the cursor. */
if (next != NULL)
*next = offset;
if (cooks != NULL) {
*cooks++ = offset;
ncooks--;
KASSERT(ncooks >= 0, ("ncookies=%d", ncooks));
}
}
zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
/* Subtract unused cookies */
if (ncookies != NULL)
*ncookies -= ncooks;
if (zfs_uio_segflg(uio) == UIO_SYSSPACE && zfs_uio_iovcnt(uio) == 1) {
iovp->iov_base += outcount;
iovp->iov_len -= outcount;
zfs_uio_resid(uio) -= outcount;
} else if ((error =
zfs_uiomove(outbuf, (long)outcount, UIO_READ, uio))) {
/*
* Reset the pointer.
*/
offset = zfs_uio_offset(uio);
}
update:
zap_cursor_fini(&zc);
if (zfs_uio_segflg(uio) != UIO_SYSSPACE || zfs_uio_iovcnt(uio) != 1)
kmem_free(outbuf, bufsize);
if (error == ENOENT)
error = 0;
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
zfs_uio_setoffset(uio, offset);
ZFS_EXIT(zfsvfs);
if (error != 0 && cookies != NULL) {
free(*cookies, M_TEMP);
*cookies = NULL;
*ncookies = 0;
}
return (error);
}
/*
* Get the requested file attributes and place them in the provided
* vattr structure.
*
* IN: vp - vnode of file.
* vap - va_mask identifies requested attributes.
* If AT_XVATTR set, then optional attrs are requested
* flags - ATTR_NOACLCHECK (CIFS server context)
* cr - credentials of caller.
*
* OUT: vap - attribute values.
*
* RETURN: 0 (always succeeds).
*/
/* ARGSUSED */
static int
zfs_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error = 0;
uint32_t blksize;
u_longlong_t nblocks;
uint64_t mtime[2], ctime[2], crtime[2], rdev;
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap = NULL;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
sa_bulk_attr_t bulk[4];
int count = 0;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &crtime, 16);
if (vp->v_type == VBLK || vp->v_type == VCHR)
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_RDEV(zfsvfs), NULL,
&rdev, 8);
if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
* Also, if we are the owner don't bother, since owner should
* always be allowed to read basic attributes of file.
*/
if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
(vap->va_uid != crgetuid(cr))) {
if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
skipaclchk, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
/*
* Return all attributes. It's cheaper to provide the answer
* than to determine whether we were asked the question.
*/
vap->va_type = IFTOVT(zp->z_mode);
vap->va_mode = zp->z_mode & ~S_IFMT;
vn_fsid(vp, vap);
vap->va_nodeid = zp->z_id;
vap->va_nlink = zp->z_links;
if ((vp->v_flag & VROOT) && zfs_show_ctldir(zp) &&
zp->z_links < ZFS_LINK_MAX)
vap->va_nlink++;
vap->va_size = zp->z_size;
if (vp->v_type == VBLK || vp->v_type == VCHR)
vap->va_rdev = zfs_cmpldev(rdev);
vap->va_seq = zp->z_seq;
vap->va_flags = 0; /* FreeBSD: Reset chflags(2) flags. */
vap->va_filerev = zp->z_seq;
/*
* Add in any requested optional attributes and the create time.
* Also set the corresponding bits in the returned attribute bitmap.
*/
if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
xoap->xoa_archive =
((zp->z_pflags & ZFS_ARCHIVE) != 0);
XVA_SET_RTN(xvap, XAT_ARCHIVE);
}
if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
xoap->xoa_readonly =
((zp->z_pflags & ZFS_READONLY) != 0);
XVA_SET_RTN(xvap, XAT_READONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
xoap->xoa_system =
((zp->z_pflags & ZFS_SYSTEM) != 0);
XVA_SET_RTN(xvap, XAT_SYSTEM);
}
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
xoap->xoa_hidden =
((zp->z_pflags & ZFS_HIDDEN) != 0);
XVA_SET_RTN(xvap, XAT_HIDDEN);
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
xoap->xoa_nounlink =
((zp->z_pflags & ZFS_NOUNLINK) != 0);
XVA_SET_RTN(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
xoap->xoa_immutable =
((zp->z_pflags & ZFS_IMMUTABLE) != 0);
XVA_SET_RTN(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
xoap->xoa_appendonly =
((zp->z_pflags & ZFS_APPENDONLY) != 0);
XVA_SET_RTN(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
xoap->xoa_nodump =
((zp->z_pflags & ZFS_NODUMP) != 0);
XVA_SET_RTN(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
xoap->xoa_opaque =
((zp->z_pflags & ZFS_OPAQUE) != 0);
XVA_SET_RTN(xvap, XAT_OPAQUE);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
xoap->xoa_av_quarantined =
((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
xoap->xoa_av_modified =
((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
vp->v_type == VREG) {
zfs_sa_get_scanstamp(zp, xvap);
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
XVA_SET_RTN(xvap, XAT_REPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
xoap->xoa_generation = zp->z_gen;
XVA_SET_RTN(xvap, XAT_GEN);
}
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
xoap->xoa_offline =
((zp->z_pflags & ZFS_OFFLINE) != 0);
XVA_SET_RTN(xvap, XAT_OFFLINE);
}
if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
xoap->xoa_sparse =
((zp->z_pflags & ZFS_SPARSE) != 0);
XVA_SET_RTN(xvap, XAT_SPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
xoap->xoa_projinherit =
((zp->z_pflags & ZFS_PROJINHERIT) != 0);
XVA_SET_RTN(xvap, XAT_PROJINHERIT);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
xoap->xoa_projid = zp->z_projid;
XVA_SET_RTN(xvap, XAT_PROJID);
}
}
ZFS_TIME_DECODE(&vap->va_atime, zp->z_atime);
ZFS_TIME_DECODE(&vap->va_mtime, mtime);
ZFS_TIME_DECODE(&vap->va_ctime, ctime);
ZFS_TIME_DECODE(&vap->va_birthtime, crtime);
sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
vap->va_blksize = blksize;
vap->va_bytes = nblocks << 9; /* nblocks * 512 */
if (zp->z_blksz == 0) {
/*
* Block size hasn't been set; suggest maximal I/O transfers.
*/
vap->va_blksize = zfsvfs->z_max_blksz;
}
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* Set the file attributes to the values contained in the
* vattr structure.
*
* IN: zp - znode of file to be modified.
* vap - new attribute values.
* If AT_XVATTR set, then optional attrs are being set
* flags - ATTR_UTIME set if non-default time values provided.
* - ATTR_NOACLCHECK (CIFS context only).
* cr - credentials of caller.
* ct - caller context
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - ctime updated, mtime updated if size changed.
*/
/* ARGSUSED */
int
zfs_setattr(znode_t *zp, vattr_t *vap, int flags, cred_t *cr)
{
vnode_t *vp = ZTOV(zp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
objset_t *os = zfsvfs->z_os;
zilog_t *zilog;
dmu_tx_t *tx;
vattr_t oldva;
xvattr_t tmpxvattr;
uint_t mask = vap->va_mask;
uint_t saved_mask = 0;
uint64_t saved_mode;
int trim_mask = 0;
uint64_t new_mode;
uint64_t new_uid, new_gid;
uint64_t xattr_obj;
uint64_t mtime[2], ctime[2];
uint64_t projid = ZFS_INVALID_PROJID;
znode_t *attrzp;
int need_policy = FALSE;
int err, err2;
zfs_fuid_info_t *fuidp = NULL;
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap;
zfs_acl_t *aclp;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
boolean_t fuid_dirtied = B_FALSE;
sa_bulk_attr_t bulk[7], xattr_bulk[7];
int count = 0, xattr_count = 0;
if (mask == 0)
return (0);
if (mask & AT_NOSET)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
zilog = zfsvfs->z_log;
/*
* Make sure that if we have ephemeral uid/gid or xvattr specified
* that file system is at proper version level
*/
if (zfsvfs->z_use_fuids == B_FALSE &&
(((mask & AT_UID) && IS_EPHEMERAL(vap->va_uid)) ||
((mask & AT_GID) && IS_EPHEMERAL(vap->va_gid)) ||
(mask & AT_XVATTR))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if (mask & AT_SIZE && vp->v_type == VDIR) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EISDIR));
}
if (mask & AT_SIZE && vp->v_type != VREG && vp->v_type != VFIFO) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* If this is an xvattr_t, then get a pointer to the structure of
* optional attributes. If this is NULL, then we have a vattr_t.
*/
xoap = xva_getxoptattr(xvap);
xva_init(&tmpxvattr);
/*
* Immutable files can only alter immutable bit and atime
*/
if ((zp->z_pflags & ZFS_IMMUTABLE) &&
((mask & (AT_SIZE|AT_UID|AT_GID|AT_MTIME|AT_MODE)) ||
((mask & AT_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
/*
* Note: ZFS_READONLY is handled in zfs_zaccess_common.
*/
/*
* Verify timestamps doesn't overflow 32 bits.
* ZFS can handle large timestamps, but 32bit syscalls can't
* handle times greater than 2039. This check should be removed
* once large timestamps are fully supported.
*/
if (mask & (AT_ATIME | AT_MTIME)) {
if (((mask & AT_ATIME) && TIMESPEC_OVERFLOW(&vap->va_atime)) ||
((mask & AT_MTIME) && TIMESPEC_OVERFLOW(&vap->va_mtime))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EOVERFLOW));
}
}
if (xoap != NULL && (mask & AT_XVATTR)) {
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME) &&
TIMESPEC_OVERFLOW(&vap->va_birthtime)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EOVERFLOW));
}
if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
if (!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(zp->z_mode) && !S_ISDIR(zp->z_mode))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EOPNOTSUPP));
}
projid = xoap->xoa_projid;
if (unlikely(projid == ZFS_INVALID_PROJID)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
projid = ZFS_INVALID_PROJID;
else
need_policy = TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
(xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
(!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(zp->z_mode) && !S_ISDIR(zp->z_mode)))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EOPNOTSUPP));
}
}
attrzp = NULL;
aclp = NULL;
if (zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EROFS));
}
/*
* First validate permissions
*/
if (mask & AT_SIZE) {
/*
* XXX - Note, we are not providing any open
* mode flags here (like FNDELAY), so we may
* block if there are locks present... this
* should be addressed in openat().
*/
/* XXX - would it be OK to generate a log record here? */
err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
if (err) {
ZFS_EXIT(zfsvfs);
return (err);
}
}
if (mask & (AT_ATIME|AT_MTIME) ||
((mask & AT_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
XVA_ISSET_REQ(xvap, XAT_READONLY) ||
XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
skipaclchk, cr);
}
if (mask & (AT_UID|AT_GID)) {
int idmask = (mask & (AT_UID|AT_GID));
int take_owner;
int take_group;
/*
* NOTE: even if a new mode is being set,
* we may clear S_ISUID/S_ISGID bits.
*/
if (!(mask & AT_MODE))
vap->va_mode = zp->z_mode;
/*
* Take ownership or chgrp to group we are a member of
*/
take_owner = (mask & AT_UID) && (vap->va_uid == crgetuid(cr));
take_group = (mask & AT_GID) &&
zfs_groupmember(zfsvfs, vap->va_gid, cr);
/*
* If both AT_UID and AT_GID are set then take_owner and
* take_group must both be set in order to allow taking
* ownership.
*
* Otherwise, send the check through secpolicy_vnode_setattr()
*
*/
if (((idmask == (AT_UID|AT_GID)) && take_owner && take_group) ||
((idmask == AT_UID) && take_owner) ||
((idmask == AT_GID) && take_group)) {
if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
skipaclchk, cr) == 0) {
/*
* Remove setuid/setgid for non-privileged users
*/
secpolicy_setid_clear(vap, vp, cr);
trim_mask = (mask & (AT_UID|AT_GID));
} else {
need_policy = TRUE;
}
} else {
need_policy = TRUE;
}
}
oldva.va_mode = zp->z_mode;
zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
if (mask & AT_XVATTR) {
/*
* Update xvattr mask to include only those attributes
* that are actually changing.
*
* the bits will be restored prior to actually setting
* the attributes so the caller thinks they were set.
*/
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
if (xoap->xoa_appendonly !=
((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_APPENDONLY);
XVA_SET_REQ(&tmpxvattr, XAT_APPENDONLY);
}
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
if (xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
XVA_SET_REQ(&tmpxvattr, XAT_PROJINHERIT);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
if (xoap->xoa_nounlink !=
((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NOUNLINK);
XVA_SET_REQ(&tmpxvattr, XAT_NOUNLINK);
}
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
if (xoap->xoa_immutable !=
((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
XVA_SET_REQ(&tmpxvattr, XAT_IMMUTABLE);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
if (xoap->xoa_nodump !=
((zp->z_pflags & ZFS_NODUMP) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NODUMP);
XVA_SET_REQ(&tmpxvattr, XAT_NODUMP);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
if (xoap->xoa_av_modified !=
((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
XVA_SET_REQ(&tmpxvattr, XAT_AV_MODIFIED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
if ((vp->v_type != VREG &&
xoap->xoa_av_quarantined) ||
xoap->xoa_av_quarantined !=
((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
XVA_SET_REQ(&tmpxvattr, XAT_AV_QUARANTINED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
if (need_policy == FALSE &&
(XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
need_policy = TRUE;
}
}
if (mask & AT_MODE) {
if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
err = secpolicy_setid_setsticky_clear(vp, vap,
&oldva, cr);
if (err) {
ZFS_EXIT(zfsvfs);
return (err);
}
trim_mask |= AT_MODE;
} else {
need_policy = TRUE;
}
}
if (need_policy) {
/*
* If trim_mask is set then take ownership
* has been granted or write_acl is present and user
* has the ability to modify mode. In that case remove
* UID|GID and or MODE from mask so that
* secpolicy_vnode_setattr() doesn't revoke it.
*/
if (trim_mask) {
saved_mask = vap->va_mask;
vap->va_mask &= ~trim_mask;
if (trim_mask & AT_MODE) {
/*
* Save the mode, as secpolicy_vnode_setattr()
* will overwrite it with ova.va_mode.
*/
saved_mode = vap->va_mode;
}
}
err = secpolicy_vnode_setattr(cr, vp, vap, &oldva, flags,
(int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
if (err) {
ZFS_EXIT(zfsvfs);
return (err);
}
if (trim_mask) {
vap->va_mask |= saved_mask;
if (trim_mask & AT_MODE) {
/*
* Recover the mode after
* secpolicy_vnode_setattr().
*/
vap->va_mode = saved_mode;
}
}
}
/*
* secpolicy_vnode_setattr, or take ownership may have
* changed va_mask
*/
mask = vap->va_mask;
if ((mask & (AT_UID | AT_GID)) || projid != ZFS_INVALID_PROJID) {
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (err == 0 && xattr_obj) {
err = zfs_zget(zp->z_zfsvfs, xattr_obj, &attrzp);
if (err == 0) {
err = vn_lock(ZTOV(attrzp), LK_EXCLUSIVE);
if (err != 0)
vrele(ZTOV(attrzp));
}
if (err)
goto out2;
}
if (mask & AT_UID) {
new_uid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
if (new_uid != zp->z_uid &&
zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
new_uid)) {
if (attrzp)
vput(ZTOV(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (mask & AT_GID) {
new_gid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
cr, ZFS_GROUP, &fuidp);
if (new_gid != zp->z_gid &&
zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
new_gid)) {
if (attrzp)
vput(ZTOV(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
if (attrzp)
vput(ZTOV(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
tx = dmu_tx_create(os);
if (mask & AT_MODE) {
uint64_t pmode = zp->z_mode;
uint64_t acl_obj;
new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
if (zp->z_zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
!(zp->z_pflags & ZFS_ACL_TRIVIAL)) {
err = SET_ERROR(EPERM);
goto out;
}
if ((err = zfs_acl_chmod_setattr(zp, &aclp, new_mode)))
goto out;
if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
/*
* Are we upgrading ACL from old V0 format
* to V1 format?
*/
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) ==
ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0,
aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
}
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
} else {
if (((mask & AT_XVATTR) &&
XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
(projid != ZFS_INVALID_PROJID &&
!(zp->z_pflags & ZFS_PROJID)))
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
}
if (attrzp) {
dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
}
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err)
goto out;
count = 0;
/*
* Set each attribute requested.
* We group settings according to the locks they need to acquire.
*
* Note: you cannot set ctime directly, although it will be
* updated as a side-effect of calling this function.
*/
if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
/*
* For the existed object that is upgraded from old system,
* its on-disk layout has no slot for the project ID attribute.
* But quota accounting logic needs to access related slots by
* offset directly. So we need to adjust old objects' layout
* to make the project ID to some unified and fixed offset.
*/
if (attrzp)
err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
if (err == 0)
err = sa_add_projid(zp->z_sa_hdl, tx, projid);
if (unlikely(err == EEXIST))
err = 0;
else if (err != 0)
goto out;
else
projid = ZFS_INVALID_PROJID;
}
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_enter(&zp->z_acl_lock);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (attrzp) {
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_enter(&attrzp->z_acl_lock);
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
sizeof (attrzp->z_pflags));
if (projid != ZFS_INVALID_PROJID) {
attrzp->z_projid = projid;
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
sizeof (attrzp->z_projid));
}
}
if (mask & (AT_UID|AT_GID)) {
if (mask & AT_UID) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&new_uid, sizeof (new_uid));
zp->z_uid = new_uid;
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_UID(zfsvfs), NULL, &new_uid,
sizeof (new_uid));
attrzp->z_uid = new_uid;
}
}
if (mask & AT_GID) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
NULL, &new_gid, sizeof (new_gid));
zp->z_gid = new_gid;
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_GID(zfsvfs), NULL, &new_gid,
sizeof (new_gid));
attrzp->z_gid = new_gid;
}
}
if (!(mask & AT_MODE)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
NULL, &new_mode, sizeof (new_mode));
new_mode = zp->z_mode;
}
err = zfs_acl_chown_setattr(zp);
ASSERT0(err);
if (attrzp) {
vn_seqc_write_begin(ZTOV(attrzp));
err = zfs_acl_chown_setattr(attrzp);
vn_seqc_write_end(ZTOV(attrzp));
ASSERT0(err);
}
}
if (mask & AT_MODE) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&new_mode, sizeof (new_mode));
zp->z_mode = new_mode;
ASSERT3P(aclp, !=, NULL);
err = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(err);
if (zp->z_acl_cached)
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = aclp;
aclp = NULL;
}
if (mask & AT_ATIME) {
ZFS_TIME_ENCODE(&vap->va_atime, zp->z_atime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&zp->z_atime, sizeof (zp->z_atime));
}
if (mask & AT_MTIME) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
mtime, sizeof (mtime));
}
if (projid != ZFS_INVALID_PROJID) {
zp->z_projid = projid;
SA_ADD_BULK_ATTR(bulk, count,
SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
sizeof (zp->z_projid));
}
/* XXX - shouldn't this be done *before* the ATIME/MTIME checks? */
if (mask & AT_SIZE && !(mask & AT_MTIME)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs),
NULL, mtime, sizeof (mtime));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
} else if (mask != 0) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime);
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
zfs_tstamp_update_setup(attrzp, STATE_CHANGED,
mtime, ctime);
}
}
/*
* Do this after setting timestamps to prevent timestamp
* update from toggling bit
*/
if (xoap && (mask & AT_XVATTR)) {
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
xoap->xoa_createtime = vap->va_birthtime;
/*
* restore trimmed off masks
* so that return masks can be set for caller.
*/
if (XVA_ISSET_REQ(&tmpxvattr, XAT_APPENDONLY)) {
XVA_SET_REQ(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_NOUNLINK)) {
XVA_SET_REQ(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_IMMUTABLE)) {
XVA_SET_REQ(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_NODUMP)) {
XVA_SET_REQ(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_MODIFIED)) {
XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_AV_QUARANTINED)) {
XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(&tmpxvattr, XAT_PROJINHERIT)) {
XVA_SET_REQ(xvap, XAT_PROJINHERIT);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
ASSERT3S(vp->v_type, ==, VREG);
zfs_xvattr_set(zp, xvap, tx);
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
if (mask != 0)
zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_exit(&zp->z_acl_lock);
if (attrzp) {
if (mask & (AT_UID|AT_GID|AT_MODE))
mutex_exit(&attrzp->z_acl_lock);
}
out:
if (err == 0 && attrzp) {
err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
xattr_count, tx);
ASSERT0(err2);
}
if (attrzp)
vput(ZTOV(attrzp));
if (aclp)
zfs_acl_free(aclp);
if (fuidp) {
zfs_fuid_info_free(fuidp);
fuidp = NULL;
}
if (err) {
dmu_tx_abort(tx);
} else {
err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
dmu_tx_commit(tx);
}
out2:
if (os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (err);
}
/*
* We acquire all but fdvp locks using non-blocking acquisitions. If we
* fail to acquire any lock in the path we will drop all held locks,
* acquire the new lock in a blocking fashion, and then release it and
* restart the rename. This acquire/release step ensures that we do not
* spin on a lock waiting for release. On error release all vnode locks
* and decrement references the way tmpfs_rename() would do.
*/
static int
zfs_rename_relock(struct vnode *sdvp, struct vnode **svpp,
struct vnode *tdvp, struct vnode **tvpp,
const struct componentname *scnp, const struct componentname *tcnp)
{
zfsvfs_t *zfsvfs;
struct vnode *nvp, *svp, *tvp;
znode_t *sdzp, *tdzp, *szp, *tzp;
const char *snm = scnp->cn_nameptr;
const char *tnm = tcnp->cn_nameptr;
int error;
VOP_UNLOCK1(tdvp);
if (*tvpp != NULL && *tvpp != tdvp)
VOP_UNLOCK1(*tvpp);
relock:
error = vn_lock(sdvp, LK_EXCLUSIVE);
if (error)
goto out;
sdzp = VTOZ(sdvp);
error = vn_lock(tdvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
if (error != EBUSY)
goto out;
error = vn_lock(tdvp, LK_EXCLUSIVE);
if (error)
goto out;
VOP_UNLOCK1(tdvp);
goto relock;
}
tdzp = VTOZ(tdvp);
/*
* Before using sdzp and tdzp we must ensure that they are live.
* As a porting legacy from illumos we have two things to worry
* about. One is typical for FreeBSD and it is that the vnode is
* not reclaimed (doomed). The other is that the znode is live.
* The current code can invalidate the znode without acquiring the
* corresponding vnode lock if the object represented by the znode
* and vnode is no longer valid after a rollback or receive operation.
* z_teardown_lock hidden behind ZFS_ENTER and ZFS_EXIT is the lock
* that protects the znodes from the invalidation.
*/
zfsvfs = sdzp->z_zfsvfs;
ASSERT3P(zfsvfs, ==, tdzp->z_zfsvfs);
ZFS_ENTER(zfsvfs);
/*
* We can not use ZFS_VERIFY_ZP() here because it could directly return
* bypassing the cleanup code in the case of an error.
*/
if (tdzp->z_sa_hdl == NULL || sdzp->z_sa_hdl == NULL) {
ZFS_EXIT(zfsvfs);
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
error = SET_ERROR(EIO);
goto out;
}
/*
* Re-resolve svp to be certain it still exists and fetch the
* correct vnode.
*/
error = zfs_dirent_lookup(sdzp, snm, &szp, ZEXISTS);
if (error != 0) {
/* Source entry invalid or not there. */
ZFS_EXIT(zfsvfs);
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
if ((scnp->cn_flags & ISDOTDOT) != 0 ||
(scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.'))
error = SET_ERROR(EINVAL);
goto out;
}
svp = ZTOV(szp);
/*
* Re-resolve tvp, if it disappeared we just carry on.
*/
error = zfs_dirent_lookup(tdzp, tnm, &tzp, 0);
if (error != 0) {
ZFS_EXIT(zfsvfs);
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
vrele(svp);
if ((tcnp->cn_flags & ISDOTDOT) != 0)
error = SET_ERROR(EINVAL);
goto out;
}
if (tzp != NULL)
tvp = ZTOV(tzp);
else
tvp = NULL;
/*
* At present the vnode locks must be acquired before z_teardown_lock,
* although it would be more logical to use the opposite order.
*/
ZFS_EXIT(zfsvfs);
/*
* Now try acquire locks on svp and tvp.
*/
nvp = svp;
error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
if (tvp != NULL)
vrele(tvp);
if (error != EBUSY) {
vrele(nvp);
goto out;
}
error = vn_lock(nvp, LK_EXCLUSIVE);
if (error != 0) {
vrele(nvp);
goto out;
}
VOP_UNLOCK1(nvp);
/*
* Concurrent rename race.
* XXX ?
*/
if (nvp == tdvp) {
vrele(nvp);
error = SET_ERROR(EINVAL);
goto out;
}
vrele(*svpp);
*svpp = nvp;
goto relock;
}
vrele(*svpp);
*svpp = nvp;
if (*tvpp != NULL)
vrele(*tvpp);
*tvpp = NULL;
if (tvp != NULL) {
nvp = tvp;
error = vn_lock(nvp, LK_EXCLUSIVE | LK_NOWAIT);
if (error != 0) {
VOP_UNLOCK1(sdvp);
VOP_UNLOCK1(tdvp);
VOP_UNLOCK1(*svpp);
if (error != EBUSY) {
vrele(nvp);
goto out;
}
error = vn_lock(nvp, LK_EXCLUSIVE);
if (error != 0) {
vrele(nvp);
goto out;
}
vput(nvp);
goto relock;
}
*tvpp = nvp;
}
return (0);
out:
return (error);
}
/*
* Note that we must use VRELE_ASYNC in this function as it walks
* up the directory tree and vrele may need to acquire an exclusive
* lock if a last reference to a vnode is dropped.
*/
static int
zfs_rename_check(znode_t *szp, znode_t *sdzp, znode_t *tdzp)
{
zfsvfs_t *zfsvfs;
znode_t *zp, *zp1;
uint64_t parent;
int error;
zfsvfs = tdzp->z_zfsvfs;
if (tdzp == szp)
return (SET_ERROR(EINVAL));
if (tdzp == sdzp)
return (0);
if (tdzp->z_id == zfsvfs->z_root)
return (0);
zp = tdzp;
for (;;) {
ASSERT(!zp->z_unlinked);
if ((error = sa_lookup(zp->z_sa_hdl,
SA_ZPL_PARENT(zfsvfs), &parent, sizeof (parent))) != 0)
break;
if (parent == szp->z_id) {
error = SET_ERROR(EINVAL);
break;
}
if (parent == zfsvfs->z_root)
break;
if (parent == sdzp->z_id)
break;
error = zfs_zget(zfsvfs, parent, &zp1);
if (error != 0)
break;
if (zp != tdzp)
VN_RELE_ASYNC(ZTOV(zp),
dsl_pool_zrele_taskq(
dmu_objset_pool(zfsvfs->z_os)));
zp = zp1;
}
if (error == ENOTDIR)
panic("checkpath: .. not a directory\n");
if (zp != tdzp)
VN_RELE_ASYNC(ZTOV(zp),
dsl_pool_zrele_taskq(dmu_objset_pool(zfsvfs->z_os)));
return (error);
}
#if __FreeBSD_version < 1300124
static void
cache_vop_rename(struct vnode *fdvp, struct vnode *fvp, struct vnode *tdvp,
struct vnode *tvp, struct componentname *fcnp, struct componentname *tcnp)
{
cache_purge(fvp);
if (tvp != NULL)
cache_purge(tvp);
cache_purge_negative(tdvp);
}
#endif
/*
* Move an entry from the provided source directory to the target
* directory. Change the entry name as indicated.
*
* IN: sdvp - Source directory containing the "old entry".
* snm - Old entry name.
* tdvp - Target directory to contain the "new entry".
* tnm - New entry name.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* sdvp,tdvp - ctime|mtime updated
*/
/*ARGSUSED*/
static int
zfs_rename_(vnode_t *sdvp, vnode_t **svpp, struct componentname *scnp,
vnode_t *tdvp, vnode_t **tvpp, struct componentname *tcnp,
cred_t *cr, int log)
{
zfsvfs_t *zfsvfs;
znode_t *sdzp, *tdzp, *szp, *tzp;
zilog_t *zilog = NULL;
dmu_tx_t *tx;
const char *snm = scnp->cn_nameptr;
const char *tnm = tcnp->cn_nameptr;
int error = 0;
bool want_seqc_end __maybe_unused = false;
/* Reject renames across filesystems. */
if ((*svpp)->v_mount != tdvp->v_mount ||
((*tvpp) != NULL && (*svpp)->v_mount != (*tvpp)->v_mount)) {
error = SET_ERROR(EXDEV);
goto out;
}
if (zfsctl_is_node(tdvp)) {
error = SET_ERROR(EXDEV);
goto out;
}
/*
* Lock all four vnodes to ensure safety and semantics of renaming.
*/
error = zfs_rename_relock(sdvp, svpp, tdvp, tvpp, scnp, tcnp);
if (error != 0) {
/* no vnodes are locked in the case of error here */
return (error);
}
tdzp = VTOZ(tdvp);
sdzp = VTOZ(sdvp);
zfsvfs = tdzp->z_zfsvfs;
zilog = zfsvfs->z_log;
/*
* After we re-enter ZFS_ENTER() we will have to revalidate all
* znodes involved.
*/
ZFS_ENTER(zfsvfs);
if (zfsvfs->z_utf8 && u8_validate(tnm,
strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
error = SET_ERROR(EILSEQ);
goto unlockout;
}
/* If source and target are the same file, there is nothing to do. */
if ((*svpp) == (*tvpp)) {
error = 0;
goto unlockout;
}
if (((*svpp)->v_type == VDIR && (*svpp)->v_mountedhere != NULL) ||
((*tvpp) != NULL && (*tvpp)->v_type == VDIR &&
(*tvpp)->v_mountedhere != NULL)) {
error = SET_ERROR(EXDEV);
goto unlockout;
}
/*
* We can not use ZFS_VERIFY_ZP() here because it could directly return
* bypassing the cleanup code in the case of an error.
*/
if (tdzp->z_sa_hdl == NULL || sdzp->z_sa_hdl == NULL) {
error = SET_ERROR(EIO);
goto unlockout;
}
szp = VTOZ(*svpp);
tzp = *tvpp == NULL ? NULL : VTOZ(*tvpp);
if (szp->z_sa_hdl == NULL || (tzp != NULL && tzp->z_sa_hdl == NULL)) {
error = SET_ERROR(EIO);
goto unlockout;
}
/*
* This is to prevent the creation of links into attribute space
* by renaming a linked file into/outof an attribute directory.
* See the comment in zfs_link() for why this is considered bad.
*/
if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
error = SET_ERROR(EINVAL);
goto unlockout;
}
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow renames into our tree when the project
* IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
error = SET_ERROR(EXDEV);
goto unlockout;
}
/*
* Must have write access at the source to remove the old entry
* and write access at the target to create the new entry.
* Note that if target and source are the same, this can be
* done in a single check.
*/
if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
goto unlockout;
if ((*svpp)->v_type == VDIR) {
/*
* Avoid ".", "..", and aliases of "." for obvious reasons.
*/
if ((scnp->cn_namelen == 1 && scnp->cn_nameptr[0] == '.') ||
sdzp == szp ||
(scnp->cn_flags | tcnp->cn_flags) & ISDOTDOT) {
error = EINVAL;
goto unlockout;
}
/*
* Check to make sure rename is valid.
* Can't do a move like this: /usr/a/b to /usr/a/b/c/d
*/
if ((error = zfs_rename_check(szp, sdzp, tdzp)))
goto unlockout;
}
/*
* Does target exist?
*/
if (tzp) {
/*
* Source and target must be the same type.
*/
if ((*svpp)->v_type == VDIR) {
if ((*tvpp)->v_type != VDIR) {
error = SET_ERROR(ENOTDIR);
goto unlockout;
} else {
cache_purge(tdvp);
if (sdvp != tdvp)
cache_purge(sdvp);
}
} else {
if ((*tvpp)->v_type == VDIR) {
error = SET_ERROR(EISDIR);
goto unlockout;
}
}
}
vn_seqc_write_begin(*svpp);
vn_seqc_write_begin(sdvp);
if (*tvpp != NULL)
vn_seqc_write_begin(*tvpp);
if (tdvp != *tvpp)
vn_seqc_write_begin(tdvp);
#if __FreeBSD_version >= 1300102
want_seqc_end = true;
#endif
vnevent_rename_src(*svpp, sdvp, scnp->cn_nameptr, ct);
if (tzp)
vnevent_rename_dest(*tvpp, tdvp, tnm, ct);
/*
* notify the target directory if it is not the same
* as source directory.
*/
if (tdvp != sdvp) {
vnevent_rename_dest_dir(tdvp, ct);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
if (sdzp != tdzp) {
dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tdzp);
}
if (tzp) {
dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tzp);
}
zfs_sa_upgrade_txholds(tx, szp);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto unlockout;
}
if (tzp) /* Attempt to remove the existing target */
error = zfs_link_destroy(tdzp, tnm, tzp, tx, 0, NULL);
if (error == 0) {
error = zfs_link_create(tdzp, tnm, szp, tx, ZRENAMING);
if (error == 0) {
szp->z_pflags |= ZFS_AV_MODIFIED;
error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
(void *)&szp->z_pflags, sizeof (uint64_t), tx);
ASSERT0(error);
error = zfs_link_destroy(sdzp, snm, szp, tx, ZRENAMING,
NULL);
if (error == 0) {
zfs_log_rename(zilog, tx, TX_RENAME, sdzp,
snm, tdzp, tnm, szp);
/*
* Update path information for the target vnode
*/
vn_renamepath(tdvp, *svpp, tnm, strlen(tnm));
} else {
/*
* At this point, we have successfully created
* the target name, but have failed to remove
* the source name. Since the create was done
* with the ZRENAMING flag, there are
* complications; for one, the link count is
* wrong. The easiest way to deal with this
* is to remove the newly created target, and
* return the original error. This must
* succeed; fortunately, it is very unlikely to
* fail, since we just created it.
*/
VERIFY0(zfs_link_destroy(tdzp, tnm, szp, tx,
ZRENAMING, NULL));
}
}
if (error == 0) {
cache_vop_rename(sdvp, *svpp, tdvp, *tvpp, scnp, tcnp);
}
}
dmu_tx_commit(tx);
unlockout: /* all 4 vnodes are locked, ZFS_ENTER called */
ZFS_EXIT(zfsvfs);
if (want_seqc_end) {
vn_seqc_write_end(*svpp);
vn_seqc_write_end(sdvp);
if (*tvpp != NULL)
vn_seqc_write_end(*tvpp);
if (tdvp != *tvpp)
vn_seqc_write_end(tdvp);
want_seqc_end = false;
}
VOP_UNLOCK1(*svpp);
VOP_UNLOCK1(sdvp);
out: /* original two vnodes are locked */
MPASS(!want_seqc_end);
if (error == 0 && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
if (*tvpp != NULL)
VOP_UNLOCK1(*tvpp);
if (tdvp != *tvpp)
VOP_UNLOCK1(tdvp);
return (error);
}
int
zfs_rename(znode_t *sdzp, const char *sname, znode_t *tdzp, const char *tname,
cred_t *cr, int flags)
{
struct componentname scn, tcn;
vnode_t *sdvp, *tdvp;
vnode_t *svp, *tvp;
int error;
svp = tvp = NULL;
sdvp = ZTOV(sdzp);
tdvp = ZTOV(tdzp);
error = zfs_lookup_internal(sdzp, sname, &svp, &scn, DELETE);
if (sdzp->z_zfsvfs->z_replay == B_FALSE)
VOP_UNLOCK1(sdvp);
if (error != 0)
goto fail;
VOP_UNLOCK1(svp);
vn_lock(tdvp, LK_EXCLUSIVE | LK_RETRY);
error = zfs_lookup_internal(tdzp, tname, &tvp, &tcn, RENAME);
if (error == EJUSTRETURN)
tvp = NULL;
else if (error != 0) {
VOP_UNLOCK1(tdvp);
goto fail;
}
error = zfs_rename_(sdvp, &svp, &scn, tdvp, &tvp, &tcn, cr, 0);
fail:
if (svp != NULL)
vrele(svp);
if (tvp != NULL)
vrele(tvp);
return (error);
}
/*
* Insert the indicated symbolic reference entry into the directory.
*
* IN: dvp - Directory to contain new symbolic link.
* link - Name for new symlink entry.
* vap - Attributes of new entry.
* cr - credentials of caller.
* ct - caller context
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dvp - ctime|mtime updated
*/
/*ARGSUSED*/
int
zfs_symlink(znode_t *dzp, const char *name, vattr_t *vap,
const char *link, znode_t **zpp, cred_t *cr, int flags)
{
znode_t *zp;
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = dzp->z_zfsvfs;
zilog_t *zilog;
uint64_t len = strlen(link);
int error;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
uint64_t txtype = TX_SYMLINK;
ASSERT3S(vap->va_type, ==, VLNK);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (len > MAXPATHLEN) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENAMETOOLONG));
}
if ((error = zfs_acl_ids_create(dzp, 0,
vap, cr, NULL, &acl_ids)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lookup(dzp, name, &zp, ZNEW);
if (error) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids,
0 /* projid */)) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EDQUOT));
}
getnewvnode_reserve_();
tx = dmu_tx_create(zfsvfs->z_os);
fuid_dirtied = zfsvfs->z_fuid_dirty;
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE + len);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
getnewvnode_drop_reserve();
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Create a new object for the symlink.
* for version 4 ZPL datasets the symlink will be an SA attribute
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
if (zp->z_is_sa)
error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
__DECONST(void *, link), len, tx);
else
zfs_sa_symlink(zp, __DECONST(char *, link), len, tx);
zp->z_size = len;
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
&zp->z_size, sizeof (zp->z_size), tx);
/*
* Insert the new object into the directory.
*/
(void) zfs_link_create(dzp, name, zp, tx, ZNEW);
zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
*zpp = zp;
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
getnewvnode_drop_reserve();
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Return, in the buffer contained in the provided uio structure,
* the symbolic path referred to by vp.
*
* IN: vp - vnode of symbolic link.
* uio - structure to contain the link path.
* cr - credentials of caller.
* ct - caller context
*
* OUT: uio - structure containing the link path.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - atime updated
*/
/* ARGSUSED */
static int
zfs_readlink(vnode_t *vp, zfs_uio_t *uio, cred_t *cr, caller_context_t *ct)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if (zp->z_is_sa)
error = sa_lookup_uio(zp->z_sa_hdl,
SA_ZPL_SYMLINK(zfsvfs), uio);
else
error = zfs_sa_readlink(zp, uio);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Insert a new entry into directory tdvp referencing svp.
*
* IN: tdvp - Directory to contain new entry.
* svp - vnode of new entry.
* name - name of new entry.
* cr - credentials of caller.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* tdvp - ctime|mtime updated
* svp - ctime updated
*/
/* ARGSUSED */
int
zfs_link(znode_t *tdzp, znode_t *szp, const char *name, cred_t *cr,
int flags)
{
znode_t *tzp;
zfsvfs_t *zfsvfs = tdzp->z_zfsvfs;
zilog_t *zilog;
dmu_tx_t *tx;
int error;
uint64_t parent;
uid_t owner;
ASSERT3S(ZTOV(tdzp)->v_type, ==, VDIR);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(tdzp);
zilog = zfsvfs->z_log;
/*
* POSIX dictates that we return EPERM here.
* Better choices include ENOTSUP or EISDIR.
*/
if (ZTOV(szp)->v_type == VDIR) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
ZFS_VERIFY_ZP(szp);
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow hard link creation in our tree when the
* project IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EXDEV));
}
if (szp->z_pflags & (ZFS_APPENDONLY |
ZFS_IMMUTABLE | ZFS_READONLY)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
/* Prevent links to .zfs/shares files */
if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (uint64_t))) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
if (parent == zfsvfs->z_shares_dir) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
if (zfsvfs->z_utf8 && u8_validate(name,
strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
/*
* We do not support links between attributes and non-attributes
* because of the potential security risk of creating links
* into "normal" file space in order to circumvent restrictions
* imposed in attribute space.
*/
if ((szp->z_pflags & ZFS_XATTR) != (tdzp->z_pflags & ZFS_XATTR)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
owner = zfs_fuid_map_id(zfsvfs, szp->z_uid, cr, ZFS_OWNER);
if (owner != crgetuid(cr) && secpolicy_basic_link(ZTOV(szp), cr) != 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
if ((error = zfs_zaccess(tdzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lookup(tdzp, name, &tzp, ZNEW);
if (error) {
ZFS_EXIT(zfsvfs);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, name);
zfs_sa_upgrade_txholds(tx, szp);
zfs_sa_upgrade_txholds(tx, tdzp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
error = zfs_link_create(tdzp, name, szp, tx, 0);
if (error == 0) {
uint64_t txtype = TX_LINK;
zfs_log_link(zilog, tx, txtype, tdzp, szp, name);
}
dmu_tx_commit(tx);
if (error == 0) {
vnevent_link(ZTOV(szp), ct);
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Free or allocate space in a file. Currently, this function only
* supports the `F_FREESP' command. However, this command is somewhat
* misnamed, as its functionality includes the ability to allocate as
* well as free space.
*
* IN: ip - inode of file to free data in.
* cmd - action to take (only F_FREESP supported).
* bfp - section of file to free/alloc.
* flag - current file open mode flags.
* offset - current file offset.
* cr - credentials of caller.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* ip - ctime|mtime updated
*/
/* ARGSUSED */
int
zfs_space(znode_t *zp, int cmd, flock64_t *bfp, int flag,
offset_t offset, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t off, len;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if (cmd != F_FREESP) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EROFS));
}
if (bfp->l_len < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Permissions aren't checked on Solaris because on this OS
* zfs_space() can only be called with an opened file handle.
* On Linux we can get here through truncate_range() which
* operates directly on inodes, so we need to check access rights.
*/
if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
off = bfp->l_start;
len = bfp->l_len; /* 0 means from off to end of file */
error = zfs_freesp(zp, off, len, flag, TRUE);
ZFS_EXIT(zfsvfs);
return (error);
}
/*ARGSUSED*/
static void
zfs_inactive(vnode_t *vp, cred_t *cr, caller_context_t *ct)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int error;
ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs);
if (zp->z_sa_hdl == NULL) {
/*
* The fs has been unmounted, or we did a
* suspend/resume and this file no longer exists.
*/
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
vrecycle(vp);
return;
}
if (zp->z_unlinked) {
/*
* Fast path to recycle a vnode of a removed file.
*/
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
vrecycle(vp);
return;
}
if (zp->z_atime_dirty && zp->z_unlinked == 0) {
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
(void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
(void *)&zp->z_atime, sizeof (zp->z_atime), tx);
zp->z_atime_dirty = 0;
dmu_tx_commit(tx);
}
}
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
}
CTASSERT(sizeof (struct zfid_short) <= sizeof (struct fid));
CTASSERT(sizeof (struct zfid_long) <= sizeof (struct fid));
/*ARGSUSED*/
static int
zfs_fid(vnode_t *vp, fid_t *fidp, caller_context_t *ct)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
uint32_t gen;
uint64_t gen64;
uint64_t object = zp->z_id;
zfid_short_t *zfid;
int size, i, error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
&gen64, sizeof (uint64_t))) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
gen = (uint32_t)gen64;
size = (zfsvfs->z_parent != zfsvfs) ? LONG_FID_LEN : SHORT_FID_LEN;
fidp->fid_len = size;
zfid = (zfid_short_t *)fidp;
zfid->zf_len = size;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* Must have a non-zero generation number to distinguish from .zfs */
if (gen == 0)
gen = 1;
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
if (size == LONG_FID_LEN) {
uint64_t objsetid = dmu_objset_id(zfsvfs->z_os);
zfid_long_t *zlfid;
zlfid = (zfid_long_t *)fidp;
for (i = 0; i < sizeof (zlfid->zf_setid); i++)
zlfid->zf_setid[i] = (uint8_t)(objsetid >> (8 * i));
/* XXX - this should be the generation number for the objset */
for (i = 0; i < sizeof (zlfid->zf_setgen); i++)
zlfid->zf_setgen[i] = 0;
}
ZFS_EXIT(zfsvfs);
return (0);
}
static int
zfs_pathconf(vnode_t *vp, int cmd, ulong_t *valp, cred_t *cr,
caller_context_t *ct)
{
znode_t *zp;
zfsvfs_t *zfsvfs;
switch (cmd) {
case _PC_LINK_MAX:
*valp = MIN(LONG_MAX, ZFS_LINK_MAX);
return (0);
case _PC_FILESIZEBITS:
*valp = 64;
return (0);
case _PC_MIN_HOLE_SIZE:
*valp = (int)SPA_MINBLOCKSIZE;
return (0);
case _PC_ACL_EXTENDED:
#if 0 /* POSIX ACLs are not implemented for ZFS on FreeBSD yet. */
zp = VTOZ(vp);
zfsvfs = zp->z_zfsvfs;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
*valp = zfsvfs->z_acl_type == ZFSACLTYPE_POSIX ? 1 : 0;
ZFS_EXIT(zfsvfs);
#else
*valp = 0;
#endif
return (0);
case _PC_ACL_NFS4:
zp = VTOZ(vp);
zfsvfs = zp->z_zfsvfs;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
*valp = zfsvfs->z_acl_type == ZFS_ACLTYPE_NFSV4 ? 1 : 0;
ZFS_EXIT(zfsvfs);
return (0);
case _PC_ACL_PATH_MAX:
*valp = ACL_MAX_ENTRIES;
return (0);
default:
return (EOPNOTSUPP);
}
}
static int
zfs_getpages(struct vnode *vp, vm_page_t *ma, int count, int *rbehind,
int *rahead)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
objset_t *os = zp->z_zfsvfs->z_os;
zfs_locked_range_t *lr;
vm_object_t object;
off_t start, end, obj_size;
uint_t blksz;
int pgsin_b, pgsin_a;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
start = IDX_TO_OFF(ma[0]->pindex);
end = IDX_TO_OFF(ma[count - 1]->pindex + 1);
/*
* Lock a range covering all required and optional pages.
* Note that we need to handle the case of the block size growing.
*/
for (;;) {
blksz = zp->z_blksz;
lr = zfs_rangelock_tryenter(&zp->z_rangelock,
rounddown(start, blksz),
roundup(end, blksz) - rounddown(start, blksz), RL_READER);
if (lr == NULL) {
if (rahead != NULL) {
*rahead = 0;
rahead = NULL;
}
if (rbehind != NULL) {
*rbehind = 0;
rbehind = NULL;
}
break;
}
if (blksz == zp->z_blksz)
break;
zfs_rangelock_exit(lr);
}
object = ma[0]->object;
zfs_vmobject_wlock(object);
obj_size = object->un_pager.vnp.vnp_size;
zfs_vmobject_wunlock(object);
if (IDX_TO_OFF(ma[count - 1]->pindex) >= obj_size) {
if (lr != NULL)
zfs_rangelock_exit(lr);
ZFS_EXIT(zfsvfs);
return (zfs_vm_pagerret_bad);
}
pgsin_b = 0;
if (rbehind != NULL) {
pgsin_b = OFF_TO_IDX(start - rounddown(start, blksz));
pgsin_b = MIN(*rbehind, pgsin_b);
}
pgsin_a = 0;
if (rahead != NULL) {
pgsin_a = OFF_TO_IDX(roundup(end, blksz) - end);
if (end + IDX_TO_OFF(pgsin_a) >= obj_size)
pgsin_a = OFF_TO_IDX(round_page(obj_size) - end);
pgsin_a = MIN(*rahead, pgsin_a);
}
/*
* NB: we need to pass the exact byte size of the data that we expect
* to read after accounting for the file size. This is required because
* ZFS will panic if we request DMU to read beyond the end of the last
* allocated block.
*/
error = dmu_read_pages(os, zp->z_id, ma, count, &pgsin_b, &pgsin_a,
MIN(end, obj_size) - (end - PAGE_SIZE));
if (lr != NULL)
zfs_rangelock_exit(lr);
ZFS_ACCESSTIME_STAMP(zfsvfs, zp);
ZFS_EXIT(zfsvfs);
if (error != 0)
return (zfs_vm_pagerret_error);
VM_CNT_INC(v_vnodein);
VM_CNT_ADD(v_vnodepgsin, count + pgsin_b + pgsin_a);
if (rbehind != NULL)
*rbehind = pgsin_b;
if (rahead != NULL)
*rahead = pgsin_a;
return (zfs_vm_pagerret_ok);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getpages_args {
struct vnode *a_vp;
vm_page_t *a_m;
int a_count;
int *a_rbehind;
int *a_rahead;
};
#endif
static int
zfs_freebsd_getpages(struct vop_getpages_args *ap)
{
return (zfs_getpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_rbehind,
ap->a_rahead));
}
static int
zfs_putpages(struct vnode *vp, vm_page_t *ma, size_t len, int flags,
int *rtvals)
{
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zfs_locked_range_t *lr;
dmu_tx_t *tx;
struct sf_buf *sf;
vm_object_t object;
vm_page_t m;
caddr_t va;
size_t tocopy;
size_t lo_len;
vm_ooffset_t lo_off;
vm_ooffset_t off;
uint_t blksz;
int ncount;
int pcount;
int err;
int i;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
object = vp->v_object;
pcount = btoc(len);
ncount = pcount;
KASSERT(ma[0]->object == object, ("mismatching object"));
KASSERT(len > 0 && (len & PAGE_MASK) == 0, ("unexpected length"));
for (i = 0; i < pcount; i++)
rtvals[i] = zfs_vm_pagerret_error;
off = IDX_TO_OFF(ma[0]->pindex);
blksz = zp->z_blksz;
lo_off = rounddown(off, blksz);
lo_len = roundup(len + (off - lo_off), blksz);
lr = zfs_rangelock_enter(&zp->z_rangelock, lo_off, lo_len, RL_WRITER);
zfs_vmobject_wlock(object);
if (len + off > object->un_pager.vnp.vnp_size) {
if (object->un_pager.vnp.vnp_size > off) {
int pgoff;
len = object->un_pager.vnp.vnp_size - off;
ncount = btoc(len);
if ((pgoff = (int)len & PAGE_MASK) != 0) {
/*
* If the object is locked and the following
* conditions hold, then the page's dirty
* field cannot be concurrently changed by a
* pmap operation.
*/
m = ma[ncount - 1];
vm_page_assert_sbusied(m);
KASSERT(!pmap_page_is_write_mapped(m),
("zfs_putpages: page %p is not read-only",
m));
vm_page_clear_dirty(m, pgoff, PAGE_SIZE -
pgoff);
}
} else {
len = 0;
ncount = 0;
}
if (ncount < pcount) {
for (i = ncount; i < pcount; i++) {
rtvals[i] = zfs_vm_pagerret_bad;
}
}
}
zfs_vmobject_wunlock(object);
if (ncount == 0)
goto out;
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT, zp->z_uid) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT, zp->z_gid) ||
(zp->z_projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
zp->z_projid))) {
goto out;
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_write(tx, zp->z_id, off, len);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err != 0) {
dmu_tx_abort(tx);
goto out;
}
if (zp->z_blksz < PAGE_SIZE) {
for (i = 0; len > 0; off += tocopy, len -= tocopy, i++) {
tocopy = len > PAGE_SIZE ? PAGE_SIZE : len;
va = zfs_map_page(ma[i], &sf);
dmu_write(zfsvfs->z_os, zp->z_id, off, tocopy, va, tx);
zfs_unmap_page(sf);
}
} else {
err = dmu_write_pages(zfsvfs->z_os, zp->z_id, off, len, ma, tx);
}
if (err == 0) {
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT0(err);
/*
* XXX we should be passing a callback to undirty
* but that would make the locking messier
*/
zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, off,
len, 0, NULL, NULL);
zfs_vmobject_wlock(object);
for (i = 0; i < ncount; i++) {
rtvals[i] = zfs_vm_pagerret_ok;
vm_page_undirty(ma[i]);
}
zfs_vmobject_wunlock(object);
VM_CNT_INC(v_vnodeout);
VM_CNT_ADD(v_vnodepgsout, ncount);
}
dmu_tx_commit(tx);
out:
zfs_rangelock_exit(lr);
if ((flags & (zfs_vm_pagerput_sync | zfs_vm_pagerput_inval)) != 0 ||
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zfsvfs->z_log, zp->z_id);
ZFS_EXIT(zfsvfs);
return (rtvals[0]);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_putpages_args {
struct vnode *a_vp;
vm_page_t *a_m;
int a_count;
int a_sync;
int *a_rtvals;
};
#endif
static int
zfs_freebsd_putpages(struct vop_putpages_args *ap)
{
return (zfs_putpages(ap->a_vp, ap->a_m, ap->a_count, ap->a_sync,
ap->a_rtvals));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_bmap_args {
struct vnode *a_vp;
daddr_t a_bn;
struct bufobj **a_bop;
daddr_t *a_bnp;
int *a_runp;
int *a_runb;
};
#endif
static int
zfs_freebsd_bmap(struct vop_bmap_args *ap)
{
if (ap->a_bop != NULL)
*ap->a_bop = &ap->a_vp->v_bufobj;
if (ap->a_bnp != NULL)
*ap->a_bnp = ap->a_bn;
if (ap->a_runp != NULL)
*ap->a_runp = 0;
if (ap->a_runb != NULL)
*ap->a_runb = 0;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_open_args {
struct vnode *a_vp;
int a_mode;
struct ucred *a_cred;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_open(struct vop_open_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
int error;
error = zfs_open(&vp, ap->a_mode, ap->a_cred);
if (error == 0)
vnode_create_vobject(vp, zp->z_size, ap->a_td);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_close_args {
struct vnode *a_vp;
int a_fflag;
struct ucred *a_cred;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_close(struct vop_close_args *ap)
{
return (zfs_close(ap->a_vp, ap->a_fflag, 1, 0, ap->a_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_ioctl_args {
struct vnode *a_vp;
ulong_t a_command;
caddr_t a_data;
int a_fflag;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_ioctl(struct vop_ioctl_args *ap)
{
return (zfs_ioctl(ap->a_vp, ap->a_command, (intptr_t)ap->a_data,
ap->a_fflag, ap->a_cred, NULL));
}
static int
ioflags(int ioflags)
{
int flags = 0;
if (ioflags & IO_APPEND)
flags |= FAPPEND;
if (ioflags & IO_NDELAY)
flags |= FNONBLOCK;
if (ioflags & IO_SYNC)
flags |= (FSYNC | FDSYNC | FRSYNC);
return (flags);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_read_args {
struct vnode *a_vp;
struct uio *a_uio;
int a_ioflag;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_read(struct vop_read_args *ap)
{
zfs_uio_t uio;
zfs_uio_init(&uio, ap->a_uio);
return (zfs_read(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_write_args {
struct vnode *a_vp;
struct uio *a_uio;
int a_ioflag;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_write(struct vop_write_args *ap)
{
zfs_uio_t uio;
zfs_uio_init(&uio, ap->a_uio);
return (zfs_write(VTOZ(ap->a_vp), &uio, ioflags(ap->a_ioflag),
ap->a_cred));
}
#if __FreeBSD_version >= 1300102
/*
* VOP_FPLOOKUP_VEXEC routines are subject to special circumstances, see
* the comment above cache_fplookup for details.
*/
static int
zfs_freebsd_fplookup_vexec(struct vop_fplookup_vexec_args *v)
{
vnode_t *vp;
znode_t *zp;
uint64_t pflags;
vp = v->a_vp;
zp = VTOZ_SMR(vp);
if (__predict_false(zp == NULL))
return (EAGAIN);
pflags = atomic_load_64(&zp->z_pflags);
if (pflags & ZFS_AV_QUARANTINED)
return (EAGAIN);
if (pflags & ZFS_XATTR)
return (EAGAIN);
if ((pflags & ZFS_NO_EXECS_DENIED) == 0)
return (EAGAIN);
return (0);
}
#endif
#if __FreeBSD_version >= 1300139
static int
zfs_freebsd_fplookup_symlink(struct vop_fplookup_symlink_args *v)
{
vnode_t *vp;
znode_t *zp;
char *target;
vp = v->a_vp;
zp = VTOZ_SMR(vp);
if (__predict_false(zp == NULL)) {
return (EAGAIN);
}
target = atomic_load_consume_ptr(&zp->z_cached_symlink);
if (target == NULL) {
return (EAGAIN);
}
return (cache_symlink_resolve(v->a_fpl, target, strlen(target)));
}
#endif
#ifndef _SYS_SYSPROTO_H_
struct vop_access_args {
struct vnode *a_vp;
accmode_t a_accmode;
struct ucred *a_cred;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_access(struct vop_access_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
accmode_t accmode;
int error = 0;
if (ap->a_accmode == VEXEC) {
if (zfs_fastaccesschk_execute(zp, ap->a_cred) == 0)
return (0);
}
/*
* ZFS itself only knowns about VREAD, VWRITE, VEXEC and VAPPEND,
*/
accmode = ap->a_accmode & (VREAD|VWRITE|VEXEC|VAPPEND);
if (accmode != 0)
error = zfs_access(zp, accmode, 0, ap->a_cred);
/*
* VADMIN has to be handled by vaccess().
*/
if (error == 0) {
accmode = ap->a_accmode & ~(VREAD|VWRITE|VEXEC|VAPPEND);
if (accmode != 0) {
#if __FreeBSD_version >= 1300105
error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
zp->z_gid, accmode, ap->a_cred);
#else
error = vaccess(vp->v_type, zp->z_mode, zp->z_uid,
zp->z_gid, accmode, ap->a_cred, NULL);
#endif
}
}
/*
* For VEXEC, ensure that at least one execute bit is set for
* non-directories.
*/
if (error == 0 && (ap->a_accmode & VEXEC) != 0 && vp->v_type != VDIR &&
(zp->z_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
error = EACCES;
}
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_lookup_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_lookup(struct vop_lookup_args *ap, boolean_t cached)
{
struct componentname *cnp = ap->a_cnp;
char nm[NAME_MAX + 1];
ASSERT3U(cnp->cn_namelen, <, sizeof (nm));
strlcpy(nm, cnp->cn_nameptr, MIN(cnp->cn_namelen + 1, sizeof (nm)));
return (zfs_lookup(ap->a_dvp, nm, ap->a_vpp, cnp, cnp->cn_nameiop,
cnp->cn_cred, cnp->cn_thread, 0, cached));
}
static int
zfs_freebsd_cachedlookup(struct vop_cachedlookup_args *ap)
{
return (zfs_freebsd_lookup((struct vop_lookup_args *)ap, B_TRUE));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_lookup_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
};
#endif
static int
zfs_cache_lookup(struct vop_lookup_args *ap)
{
zfsvfs_t *zfsvfs;
zfsvfs = ap->a_dvp->v_mount->mnt_data;
if (zfsvfs->z_use_namecache)
return (vfs_cache_lookup(ap));
else
return (zfs_freebsd_lookup(ap, B_FALSE));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_create_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
struct vattr *a_vap;
};
#endif
static int
zfs_freebsd_create(struct vop_create_args *ap)
{
zfsvfs_t *zfsvfs;
struct componentname *cnp = ap->a_cnp;
vattr_t *vap = ap->a_vap;
znode_t *zp = NULL;
int rc, mode;
ASSERT(cnp->cn_flags & SAVENAME);
vattr_init_mask(vap);
mode = vap->va_mode & ALLPERMS;
zfsvfs = ap->a_dvp->v_mount->mnt_data;
*ap->a_vpp = NULL;
rc = zfs_create(VTOZ(ap->a_dvp), cnp->cn_nameptr, vap, !EXCL, mode,
&zp, cnp->cn_cred, 0 /* flag */, NULL /* vsecattr */);
if (rc == 0)
*ap->a_vpp = ZTOV(zp);
if (zfsvfs->z_use_namecache &&
rc == 0 && (cnp->cn_flags & MAKEENTRY) != 0)
cache_enter(ap->a_dvp, *ap->a_vpp, cnp);
return (rc);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_remove_args {
struct vnode *a_dvp;
struct vnode *a_vp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_remove(struct vop_remove_args *ap)
{
ASSERT(ap->a_cnp->cn_flags & SAVENAME);
return (zfs_remove_(ap->a_dvp, ap->a_vp, ap->a_cnp->cn_nameptr,
ap->a_cnp->cn_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_mkdir_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
struct vattr *a_vap;
};
#endif
static int
zfs_freebsd_mkdir(struct vop_mkdir_args *ap)
{
vattr_t *vap = ap->a_vap;
znode_t *zp = NULL;
int rc;
ASSERT(ap->a_cnp->cn_flags & SAVENAME);
vattr_init_mask(vap);
*ap->a_vpp = NULL;
rc = zfs_mkdir(VTOZ(ap->a_dvp), ap->a_cnp->cn_nameptr, vap, &zp,
ap->a_cnp->cn_cred, 0, NULL);
if (rc == 0)
*ap->a_vpp = ZTOV(zp);
return (rc);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_rmdir_args {
struct vnode *a_dvp;
struct vnode *a_vp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_rmdir(struct vop_rmdir_args *ap)
{
struct componentname *cnp = ap->a_cnp;
ASSERT(cnp->cn_flags & SAVENAME);
return (zfs_rmdir_(ap->a_dvp, ap->a_vp, cnp->cn_nameptr, cnp->cn_cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_readdir_args {
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
int *a_eofflag;
int *a_ncookies;
ulong_t **a_cookies;
};
#endif
static int
zfs_freebsd_readdir(struct vop_readdir_args *ap)
{
zfs_uio_t uio;
zfs_uio_init(&uio, ap->a_uio);
return (zfs_readdir(ap->a_vp, &uio, ap->a_cred, ap->a_eofflag,
ap->a_ncookies, ap->a_cookies));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_fsync_args {
struct vnode *a_vp;
int a_waitfor;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_fsync(struct vop_fsync_args *ap)
{
vop_stdfsync(ap);
return (zfs_fsync(VTOZ(ap->a_vp), 0, ap->a_td->td_ucred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getattr_args {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_getattr(struct vop_getattr_args *ap)
{
vattr_t *vap = ap->a_vap;
xvattr_t xvap;
ulong_t fflags = 0;
int error;
xva_init(&xvap);
xvap.xva_vattr = *vap;
xvap.xva_vattr.va_mask |= AT_XVATTR;
/* Convert chflags into ZFS-type flags. */
/* XXX: what about SF_SETTABLE?. */
XVA_SET_REQ(&xvap, XAT_IMMUTABLE);
XVA_SET_REQ(&xvap, XAT_APPENDONLY);
XVA_SET_REQ(&xvap, XAT_NOUNLINK);
XVA_SET_REQ(&xvap, XAT_NODUMP);
XVA_SET_REQ(&xvap, XAT_READONLY);
XVA_SET_REQ(&xvap, XAT_ARCHIVE);
XVA_SET_REQ(&xvap, XAT_SYSTEM);
XVA_SET_REQ(&xvap, XAT_HIDDEN);
XVA_SET_REQ(&xvap, XAT_REPARSE);
XVA_SET_REQ(&xvap, XAT_OFFLINE);
XVA_SET_REQ(&xvap, XAT_SPARSE);
error = zfs_getattr(ap->a_vp, (vattr_t *)&xvap, 0, ap->a_cred);
if (error != 0)
return (error);
/* Convert ZFS xattr into chflags. */
#define FLAG_CHECK(fflag, xflag, xfield) do { \
if (XVA_ISSET_RTN(&xvap, (xflag)) && (xfield) != 0) \
fflags |= (fflag); \
} while (0)
FLAG_CHECK(SF_IMMUTABLE, XAT_IMMUTABLE,
xvap.xva_xoptattrs.xoa_immutable);
FLAG_CHECK(SF_APPEND, XAT_APPENDONLY,
xvap.xva_xoptattrs.xoa_appendonly);
FLAG_CHECK(SF_NOUNLINK, XAT_NOUNLINK,
xvap.xva_xoptattrs.xoa_nounlink);
FLAG_CHECK(UF_ARCHIVE, XAT_ARCHIVE,
xvap.xva_xoptattrs.xoa_archive);
FLAG_CHECK(UF_NODUMP, XAT_NODUMP,
xvap.xva_xoptattrs.xoa_nodump);
FLAG_CHECK(UF_READONLY, XAT_READONLY,
xvap.xva_xoptattrs.xoa_readonly);
FLAG_CHECK(UF_SYSTEM, XAT_SYSTEM,
xvap.xva_xoptattrs.xoa_system);
FLAG_CHECK(UF_HIDDEN, XAT_HIDDEN,
xvap.xva_xoptattrs.xoa_hidden);
FLAG_CHECK(UF_REPARSE, XAT_REPARSE,
xvap.xva_xoptattrs.xoa_reparse);
FLAG_CHECK(UF_OFFLINE, XAT_OFFLINE,
xvap.xva_xoptattrs.xoa_offline);
FLAG_CHECK(UF_SPARSE, XAT_SPARSE,
xvap.xva_xoptattrs.xoa_sparse);
#undef FLAG_CHECK
*vap = xvap.xva_vattr;
vap->va_flags = fflags;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_setattr_args {
struct vnode *a_vp;
struct vattr *a_vap;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_setattr(struct vop_setattr_args *ap)
{
vnode_t *vp = ap->a_vp;
vattr_t *vap = ap->a_vap;
cred_t *cred = ap->a_cred;
xvattr_t xvap;
ulong_t fflags;
uint64_t zflags;
vattr_init_mask(vap);
vap->va_mask &= ~AT_NOSET;
xva_init(&xvap);
xvap.xva_vattr = *vap;
zflags = VTOZ(vp)->z_pflags;
if (vap->va_flags != VNOVAL) {
zfsvfs_t *zfsvfs = VTOZ(vp)->z_zfsvfs;
int error;
if (zfsvfs->z_use_fuids == B_FALSE)
return (EOPNOTSUPP);
fflags = vap->va_flags;
/*
* XXX KDM
* We need to figure out whether it makes sense to allow
* UF_REPARSE through, since we don't really have other
* facilities to handle reparse points and zfs_setattr()
* doesn't currently allow setting that attribute anyway.
*/
if ((fflags & ~(SF_IMMUTABLE|SF_APPEND|SF_NOUNLINK|UF_ARCHIVE|
UF_NODUMP|UF_SYSTEM|UF_HIDDEN|UF_READONLY|UF_REPARSE|
UF_OFFLINE|UF_SPARSE)) != 0)
return (EOPNOTSUPP);
/*
* Unprivileged processes are not permitted to unset system
* flags, or modify flags if any system flags are set.
* Privileged non-jail processes may not modify system flags
* if securelevel > 0 and any existing system flags are set.
* Privileged jail processes behave like privileged non-jail
* processes if the PR_ALLOW_CHFLAGS permission bit is set;
* otherwise, they behave like unprivileged processes.
*/
if (secpolicy_fs_owner(vp->v_mount, cred) == 0 ||
spl_priv_check_cred(cred, PRIV_VFS_SYSFLAGS) == 0) {
if (zflags &
(ZFS_IMMUTABLE | ZFS_APPENDONLY | ZFS_NOUNLINK)) {
error = securelevel_gt(cred, 0);
if (error != 0)
return (error);
}
} else {
/*
* Callers may only modify the file flags on
* objects they have VADMIN rights for.
*/
if ((error = VOP_ACCESS(vp, VADMIN, cred,
curthread)) != 0)
return (error);
if (zflags &
(ZFS_IMMUTABLE | ZFS_APPENDONLY |
ZFS_NOUNLINK)) {
return (EPERM);
}
if (fflags &
(SF_IMMUTABLE | SF_APPEND | SF_NOUNLINK)) {
return (EPERM);
}
}
#define FLAG_CHANGE(fflag, zflag, xflag, xfield) do { \
if (((fflags & (fflag)) && !(zflags & (zflag))) || \
((zflags & (zflag)) && !(fflags & (fflag)))) { \
XVA_SET_REQ(&xvap, (xflag)); \
(xfield) = ((fflags & (fflag)) != 0); \
} \
} while (0)
/* Convert chflags into ZFS-type flags. */
/* XXX: what about SF_SETTABLE?. */
FLAG_CHANGE(SF_IMMUTABLE, ZFS_IMMUTABLE, XAT_IMMUTABLE,
xvap.xva_xoptattrs.xoa_immutable);
FLAG_CHANGE(SF_APPEND, ZFS_APPENDONLY, XAT_APPENDONLY,
xvap.xva_xoptattrs.xoa_appendonly);
FLAG_CHANGE(SF_NOUNLINK, ZFS_NOUNLINK, XAT_NOUNLINK,
xvap.xva_xoptattrs.xoa_nounlink);
FLAG_CHANGE(UF_ARCHIVE, ZFS_ARCHIVE, XAT_ARCHIVE,
xvap.xva_xoptattrs.xoa_archive);
FLAG_CHANGE(UF_NODUMP, ZFS_NODUMP, XAT_NODUMP,
xvap.xva_xoptattrs.xoa_nodump);
FLAG_CHANGE(UF_READONLY, ZFS_READONLY, XAT_READONLY,
xvap.xva_xoptattrs.xoa_readonly);
FLAG_CHANGE(UF_SYSTEM, ZFS_SYSTEM, XAT_SYSTEM,
xvap.xva_xoptattrs.xoa_system);
FLAG_CHANGE(UF_HIDDEN, ZFS_HIDDEN, XAT_HIDDEN,
xvap.xva_xoptattrs.xoa_hidden);
FLAG_CHANGE(UF_REPARSE, ZFS_REPARSE, XAT_REPARSE,
xvap.xva_xoptattrs.xoa_reparse);
FLAG_CHANGE(UF_OFFLINE, ZFS_OFFLINE, XAT_OFFLINE,
xvap.xva_xoptattrs.xoa_offline);
FLAG_CHANGE(UF_SPARSE, ZFS_SPARSE, XAT_SPARSE,
xvap.xva_xoptattrs.xoa_sparse);
#undef FLAG_CHANGE
}
if (vap->va_birthtime.tv_sec != VNOVAL) {
xvap.xva_vattr.va_mask |= AT_XVATTR;
XVA_SET_REQ(&xvap, XAT_CREATETIME);
}
return (zfs_setattr(VTOZ(vp), (vattr_t *)&xvap, 0, cred));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_rename_args {
struct vnode *a_fdvp;
struct vnode *a_fvp;
struct componentname *a_fcnp;
struct vnode *a_tdvp;
struct vnode *a_tvp;
struct componentname *a_tcnp;
};
#endif
static int
zfs_freebsd_rename(struct vop_rename_args *ap)
{
vnode_t *fdvp = ap->a_fdvp;
vnode_t *fvp = ap->a_fvp;
vnode_t *tdvp = ap->a_tdvp;
vnode_t *tvp = ap->a_tvp;
int error;
ASSERT(ap->a_fcnp->cn_flags & (SAVENAME|SAVESTART));
ASSERT(ap->a_tcnp->cn_flags & (SAVENAME|SAVESTART));
error = zfs_rename_(fdvp, &fvp, ap->a_fcnp, tdvp, &tvp,
ap->a_tcnp, ap->a_fcnp->cn_cred, 1);
vrele(fdvp);
vrele(fvp);
vrele(tdvp);
if (tvp != NULL)
vrele(tvp);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_symlink_args {
struct vnode *a_dvp;
struct vnode **a_vpp;
struct componentname *a_cnp;
struct vattr *a_vap;
char *a_target;
};
#endif
static int
zfs_freebsd_symlink(struct vop_symlink_args *ap)
{
struct componentname *cnp = ap->a_cnp;
vattr_t *vap = ap->a_vap;
znode_t *zp = NULL;
#if __FreeBSD_version >= 1300139
char *symlink;
size_t symlink_len;
#endif
int rc;
ASSERT(cnp->cn_flags & SAVENAME);
vap->va_type = VLNK; /* FreeBSD: Syscall only sets va_mode. */
vattr_init_mask(vap);
*ap->a_vpp = NULL;
rc = zfs_symlink(VTOZ(ap->a_dvp), cnp->cn_nameptr, vap,
ap->a_target, &zp, cnp->cn_cred, 0 /* flags */);
if (rc == 0) {
*ap->a_vpp = ZTOV(zp);
ASSERT_VOP_ELOCKED(ZTOV(zp), __func__);
#if __FreeBSD_version >= 1300139
MPASS(zp->z_cached_symlink == NULL);
symlink_len = strlen(ap->a_target);
symlink = cache_symlink_alloc(symlink_len + 1, M_WAITOK);
if (symlink != NULL) {
memcpy(symlink, ap->a_target, symlink_len);
symlink[symlink_len] = '\0';
atomic_store_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
(uintptr_t)symlink);
}
#endif
}
return (rc);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_readlink_args {
struct vnode *a_vp;
struct uio *a_uio;
struct ucred *a_cred;
};
#endif
static int
zfs_freebsd_readlink(struct vop_readlink_args *ap)
{
zfs_uio_t uio;
int error;
#if __FreeBSD_version >= 1300139
znode_t *zp = VTOZ(ap->a_vp);
char *symlink, *base;
size_t symlink_len;
bool trycache;
#endif
zfs_uio_init(&uio, ap->a_uio);
#if __FreeBSD_version >= 1300139
trycache = false;
if (zfs_uio_segflg(&uio) == UIO_SYSSPACE &&
zfs_uio_iovcnt(&uio) == 1) {
base = zfs_uio_iovbase(&uio, 0);
symlink_len = zfs_uio_iovlen(&uio, 0);
trycache = true;
}
#endif
error = zfs_readlink(ap->a_vp, &uio, ap->a_cred, NULL);
#if __FreeBSD_version >= 1300139
if (atomic_load_ptr(&zp->z_cached_symlink) != NULL ||
error != 0 || !trycache) {
return (error);
}
symlink_len -= zfs_uio_resid(&uio);
symlink = cache_symlink_alloc(symlink_len + 1, M_WAITOK);
if (symlink != NULL) {
memcpy(symlink, base, symlink_len);
symlink[symlink_len] = '\0';
if (!atomic_cmpset_rel_ptr((uintptr_t *)&zp->z_cached_symlink,
(uintptr_t)NULL, (uintptr_t)symlink)) {
cache_symlink_free(symlink, symlink_len + 1);
}
}
#endif
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_link_args {
struct vnode *a_tdvp;
struct vnode *a_vp;
struct componentname *a_cnp;
};
#endif
static int
zfs_freebsd_link(struct vop_link_args *ap)
{
struct componentname *cnp = ap->a_cnp;
vnode_t *vp = ap->a_vp;
vnode_t *tdvp = ap->a_tdvp;
if (tdvp->v_mount != vp->v_mount)
return (EXDEV);
ASSERT(cnp->cn_flags & SAVENAME);
return (zfs_link(VTOZ(tdvp), VTOZ(vp),
cnp->cn_nameptr, cnp->cn_cred, 0));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_inactive_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_inactive(struct vop_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
#if __FreeBSD_version >= 1300123
zfs_inactive(vp, curthread->td_ucred, NULL);
#else
zfs_inactive(vp, ap->a_td->td_ucred, NULL);
#endif
return (0);
}
#if __FreeBSD_version >= 1300042
#ifndef _SYS_SYSPROTO_H_
struct vop_need_inactive_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_need_inactive(struct vop_need_inactive_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
int need;
if (vn_need_pageq_flush(vp))
return (1);
if (!ZFS_TEARDOWN_INACTIVE_TRY_ENTER_READ(zfsvfs))
return (1);
need = (zp->z_sa_hdl == NULL || zp->z_unlinked || zp->z_atime_dirty);
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
return (need);
}
#endif
#ifndef _SYS_SYSPROTO_H_
struct vop_reclaim_args {
struct vnode *a_vp;
struct thread *a_td;
};
#endif
static int
zfs_freebsd_reclaim(struct vop_reclaim_args *ap)
{
vnode_t *vp = ap->a_vp;
znode_t *zp = VTOZ(vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
ASSERT3P(zp, !=, NULL);
#if __FreeBSD_version < 1300042
/* Destroy the vm object and flush associated pages. */
vnode_destroy_vobject(vp);
#endif
/*
* z_teardown_inactive_lock protects from a race with
* zfs_znode_dmu_fini in zfsvfs_teardown during
* force unmount.
*/
ZFS_TEARDOWN_INACTIVE_ENTER_READ(zfsvfs);
if (zp->z_sa_hdl == NULL)
zfs_znode_free(zp);
else
zfs_zinactive(zp);
ZFS_TEARDOWN_INACTIVE_EXIT_READ(zfsvfs);
vp->v_data = NULL;
return (0);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_fid_args {
struct vnode *a_vp;
struct fid *a_fid;
};
#endif
static int
zfs_freebsd_fid(struct vop_fid_args *ap)
{
return (zfs_fid(ap->a_vp, (void *)ap->a_fid, NULL));
}
#ifndef _SYS_SYSPROTO_H_
struct vop_pathconf_args {
struct vnode *a_vp;
int a_name;
register_t *a_retval;
} *ap;
#endif
static int
zfs_freebsd_pathconf(struct vop_pathconf_args *ap)
{
ulong_t val;
int error;
error = zfs_pathconf(ap->a_vp, ap->a_name, &val,
curthread->td_ucred, NULL);
if (error == 0) {
*ap->a_retval = val;
return (error);
}
if (error != EOPNOTSUPP)
return (error);
switch (ap->a_name) {
case _PC_NAME_MAX:
*ap->a_retval = NAME_MAX;
return (0);
#if __FreeBSD_version >= 1400032
case _PC_DEALLOC_PRESENT:
*ap->a_retval = 1;
return (0);
#endif
case _PC_PIPE_BUF:
if (ap->a_vp->v_type == VDIR || ap->a_vp->v_type == VFIFO) {
*ap->a_retval = PIPE_BUF;
return (0);
}
return (EINVAL);
default:
return (vop_stdpathconf(ap));
}
}
/*
* FreeBSD's extended attributes namespace defines file name prefix for ZFS'
* extended attribute name:
*
* NAMESPACE PREFIX
* system freebsd:system:
* user (none, can be used to access ZFS fsattr(5) attributes
* created on Solaris)
*/
static int
zfs_create_attrname(int attrnamespace, const char *name, char *attrname,
size_t size)
{
const char *namespace, *prefix, *suffix;
/* We don't allow '/' character in attribute name. */
if (strchr(name, '/') != NULL)
return (SET_ERROR(EINVAL));
/* We don't allow attribute names that start with "freebsd:" string. */
if (strncmp(name, "freebsd:", 8) == 0)
return (SET_ERROR(EINVAL));
bzero(attrname, size);
switch (attrnamespace) {
case EXTATTR_NAMESPACE_USER:
#if 0
prefix = "freebsd:";
namespace = EXTATTR_NAMESPACE_USER_STRING;
suffix = ":";
#else
/*
* This is the default namespace by which we can access all
* attributes created on Solaris.
*/
prefix = namespace = suffix = "";
#endif
break;
case EXTATTR_NAMESPACE_SYSTEM:
prefix = "freebsd:";
namespace = EXTATTR_NAMESPACE_SYSTEM_STRING;
suffix = ":";
break;
case EXTATTR_NAMESPACE_EMPTY:
default:
return (SET_ERROR(EINVAL));
}
if (snprintf(attrname, size, "%s%s%s%s", prefix, namespace, suffix,
name) >= size) {
return (SET_ERROR(ENAMETOOLONG));
}
return (0);
}
static int
zfs_ensure_xattr_cached(znode_t *zp)
{
int error = 0;
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
if (zp->z_xattr_cached != NULL)
return (0);
if (rw_write_held(&zp->z_xattr_lock))
return (zfs_sa_get_xattr(zp));
if (!rw_tryupgrade(&zp->z_xattr_lock)) {
rw_exit(&zp->z_xattr_lock);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
}
if (zp->z_xattr_cached == NULL)
error = zfs_sa_get_xattr(zp);
rw_downgrade(&zp->z_xattr_lock);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
IN const char *a_name;
INOUT struct uio *a_uio;
OUT size_t *a_size;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_getextattr_dir(struct vop_getextattr_args *ap, const char *attrname)
{
struct thread *td = ap->a_td;
struct nameidata nd;
struct vattr va;
vnode_t *xvp = NULL, *vp;
int error, flags;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
LOOKUP_XATTR, B_FALSE);
if (error != 0)
return (error);
flags = FREAD;
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname,
xvp, td);
error = vn_open_cred(&nd, &flags, 0, VN_OPEN_INVFS, ap->a_cred, NULL);
vp = nd.ni_vp;
NDFREE(&nd, NDF_ONLY_PNBUF);
if (error != 0)
return (SET_ERROR(error));
if (ap->a_size != NULL) {
error = VOP_GETATTR(vp, &va, ap->a_cred);
if (error == 0)
*ap->a_size = (size_t)va.va_size;
} else if (ap->a_uio != NULL)
error = VOP_READ(vp, ap->a_uio, IO_UNIT, ap->a_cred);
VOP_UNLOCK1(vp);
vn_close(vp, flags, ap->a_cred, td);
return (error);
}
static int
zfs_getextattr_sa(struct vop_getextattr_args *ap, const char *attrname)
{
znode_t *zp = VTOZ(ap->a_vp);
uchar_t *nv_value;
uint_t nv_size;
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
error = nvlist_lookup_byte_array(zp->z_xattr_cached, attrname,
&nv_value, &nv_size);
if (error != 0)
return (SET_ERROR(error));
if (ap->a_size != NULL)
*ap->a_size = nv_size;
else if (ap->a_uio != NULL)
error = uiomove(nv_value, nv_size, ap->a_uio);
if (error != 0)
return (SET_ERROR(error));
return (0);
}
/*
* Vnode operation to retrieve a named extended attribute.
*/
static int
zfs_getextattr(struct vop_getextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrname[EXTATTR_MAXNAMELEN+1];
int error;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VREAD);
if (error != 0)
return (SET_ERROR(error));
error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
sizeof (attrname));
if (error != 0)
return (error);
error = ENOENT;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp)
rw_enter(&zp->z_xattr_lock, RW_READER);
if (zfsvfs->z_use_sa && zp->z_is_sa)
error = zfs_getextattr_sa(ap, attrname);
if (error == ENOENT)
error = zfs_getextattr_dir(ap, attrname);
rw_exit(&zp->z_xattr_lock);
ZFS_EXIT(zfsvfs);
if (error == ENOENT)
error = SET_ERROR(ENOATTR);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_deleteextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
IN const char *a_name;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_deleteextattr_dir(struct vop_deleteextattr_args *ap, const char *attrname)
{
struct thread *td = ap->a_td;
struct nameidata nd;
vnode_t *xvp = NULL, *vp;
int error;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
LOOKUP_XATTR, B_FALSE);
if (error != 0)
return (error);
NDINIT_ATVP(&nd, DELETE, NOFOLLOW | LOCKPARENT | LOCKLEAF,
UIO_SYSSPACE, attrname, xvp, td);
error = namei(&nd);
vp = nd.ni_vp;
if (error != 0) {
NDFREE(&nd, NDF_ONLY_PNBUF);
return (SET_ERROR(error));
}
error = VOP_REMOVE(nd.ni_dvp, vp, &nd.ni_cnd);
NDFREE(&nd, NDF_ONLY_PNBUF);
vput(nd.ni_dvp);
if (vp == nd.ni_dvp)
vrele(vp);
else
vput(vp);
return (error);
}
static int
zfs_deleteextattr_sa(struct vop_deleteextattr_args *ap, const char *attrname)
{
znode_t *zp = VTOZ(ap->a_vp);
nvlist_t *nvl;
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
nvl = zp->z_xattr_cached;
error = nvlist_remove(nvl, attrname, DATA_TYPE_BYTE_ARRAY);
if (error != 0)
error = SET_ERROR(error);
else
error = zfs_sa_set_xattr(zp);
if (error != 0) {
zp->z_xattr_cached = NULL;
nvlist_free(nvl);
}
return (error);
}
/*
* Vnode operation to remove a named attribute.
*/
static int
zfs_deleteextattr(struct vop_deleteextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrname[EXTATTR_MAXNAMELEN+1];
int error;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VWRITE);
if (error != 0)
return (SET_ERROR(error));
error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
sizeof (attrname));
if (error != 0)
return (error);
size_t size = 0;
struct vop_getextattr_args vga = {
.a_vp = ap->a_vp,
.a_size = &size,
.a_cred = ap->a_cred,
.a_td = ap->a_td,
};
error = ENOENT;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
if (zfsvfs->z_use_sa && zp->z_is_sa) {
error = zfs_getextattr_sa(&vga, attrname);
if (error == 0)
error = zfs_deleteextattr_sa(ap, attrname);
}
if (error == ENOENT) {
error = zfs_getextattr_dir(&vga, attrname);
if (error == 0)
error = zfs_deleteextattr_dir(ap, attrname);
}
rw_exit(&zp->z_xattr_lock);
ZFS_EXIT(zfsvfs);
if (error == ENOENT)
error = SET_ERROR(ENOATTR);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_setextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
IN const char *a_name;
INOUT struct uio *a_uio;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_setextattr_dir(struct vop_setextattr_args *ap, const char *attrname)
{
struct thread *td = ap->a_td;
struct nameidata nd;
struct vattr va;
vnode_t *xvp = NULL, *vp;
int error, flags;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
LOOKUP_XATTR | CREATE_XATTR_DIR, B_FALSE);
if (error != 0)
return (error);
flags = FFLAGS(O_WRONLY | O_CREAT);
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW, UIO_SYSSPACE, attrname, xvp, td);
error = vn_open_cred(&nd, &flags, 0600, VN_OPEN_INVFS, ap->a_cred,
NULL);
vp = nd.ni_vp;
NDFREE(&nd, NDF_ONLY_PNBUF);
if (error != 0)
return (SET_ERROR(error));
VATTR_NULL(&va);
va.va_size = 0;
error = VOP_SETATTR(vp, &va, ap->a_cred);
if (error == 0)
VOP_WRITE(vp, ap->a_uio, IO_UNIT, ap->a_cred);
VOP_UNLOCK1(vp);
vn_close(vp, flags, ap->a_cred, td);
return (error);
}
static int
zfs_setextattr_sa(struct vop_setextattr_args *ap, const char *attrname)
{
znode_t *zp = VTOZ(ap->a_vp);
nvlist_t *nvl;
size_t sa_size;
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_WRITE_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
nvl = zp->z_xattr_cached;
size_t entry_size = ap->a_uio->uio_resid;
if (entry_size > DXATTR_MAX_ENTRY_SIZE)
return (SET_ERROR(EFBIG));
error = nvlist_size(nvl, &sa_size, NV_ENCODE_XDR);
if (error != 0)
return (SET_ERROR(error));
if (sa_size > DXATTR_MAX_SA_SIZE)
return (SET_ERROR(EFBIG));
uchar_t *buf = kmem_alloc(entry_size, KM_SLEEP);
error = uiomove(buf, entry_size, ap->a_uio);
if (error != 0) {
error = SET_ERROR(error);
} else {
error = nvlist_add_byte_array(nvl, attrname, buf, entry_size);
if (error != 0)
error = SET_ERROR(error);
}
kmem_free(buf, entry_size);
if (error == 0)
error = zfs_sa_set_xattr(zp);
if (error != 0) {
zp->z_xattr_cached = NULL;
nvlist_free(nvl);
}
return (error);
}
/*
* Vnode operation to set a named attribute.
*/
static int
zfs_setextattr(struct vop_setextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrname[EXTATTR_MAXNAMELEN+1];
int error;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VWRITE);
if (error != 0)
return (SET_ERROR(error));
error = zfs_create_attrname(ap->a_attrnamespace, ap->a_name, attrname,
sizeof (attrname));
if (error != 0)
return (error);
struct vop_deleteextattr_args vda = {
.a_vp = ap->a_vp,
.a_cred = ap->a_cred,
.a_td = ap->a_td,
};
error = ENOENT;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
if (zfsvfs->z_use_sa && zp->z_is_sa && zfsvfs->z_xattr_sa) {
error = zfs_setextattr_sa(ap, attrname);
if (error == 0)
/*
* Successfully put into SA, we need to clear the one
* in dir if present.
*/
zfs_deleteextattr_dir(&vda, attrname);
}
if (error) {
error = zfs_setextattr_dir(ap, attrname);
if (error == 0 && zp->z_is_sa)
/*
* Successfully put into dir, we need to clear the one
* in SA if present.
*/
zfs_deleteextattr_sa(&vda, attrname);
}
rw_exit(&zp->z_xattr_lock);
ZFS_EXIT(zfsvfs);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_listextattr {
IN struct vnode *a_vp;
IN int a_attrnamespace;
INOUT struct uio *a_uio;
OUT size_t *a_size;
IN struct ucred *a_cred;
IN struct thread *a_td;
};
#endif
static int
zfs_listextattr_dir(struct vop_listextattr_args *ap, const char *attrprefix)
{
struct thread *td = ap->a_td;
struct nameidata nd;
uint8_t dirbuf[sizeof (struct dirent)];
struct iovec aiov;
struct uio auio;
vnode_t *xvp = NULL, *vp;
int error, eof;
error = zfs_lookup(ap->a_vp, NULL, &xvp, NULL, 0, ap->a_cred, td,
LOOKUP_XATTR, B_FALSE);
if (error != 0) {
/*
* ENOATTR means that the EA directory does not yet exist,
* i.e. there are no extended attributes there.
*/
if (error == ENOATTR)
error = 0;
return (error);
}
NDINIT_ATVP(&nd, LOOKUP, NOFOLLOW | LOCKLEAF | LOCKSHARED,
UIO_SYSSPACE, ".", xvp, td);
error = namei(&nd);
vp = nd.ni_vp;
NDFREE(&nd, NDF_ONLY_PNBUF);
if (error != 0)
return (SET_ERROR(error));
auio.uio_iov = &aiov;
auio.uio_iovcnt = 1;
auio.uio_segflg = UIO_SYSSPACE;
auio.uio_td = td;
auio.uio_rw = UIO_READ;
auio.uio_offset = 0;
size_t plen = strlen(attrprefix);
do {
aiov.iov_base = (void *)dirbuf;
aiov.iov_len = sizeof (dirbuf);
auio.uio_resid = sizeof (dirbuf);
error = VOP_READDIR(vp, &auio, ap->a_cred, &eof, NULL, NULL);
if (error != 0)
break;
int done = sizeof (dirbuf) - auio.uio_resid;
for (int pos = 0; pos < done; ) {
struct dirent *dp = (struct dirent *)(dirbuf + pos);
pos += dp->d_reclen;
/*
* XXX: Temporarily we also accept DT_UNKNOWN, as this
* is what we get when attribute was created on Solaris.
*/
if (dp->d_type != DT_REG && dp->d_type != DT_UNKNOWN)
continue;
else if (plen == 0 &&
strncmp(dp->d_name, "freebsd:", 8) == 0)
continue;
else if (strncmp(dp->d_name, attrprefix, plen) != 0)
continue;
uint8_t nlen = dp->d_namlen - plen;
if (ap->a_size != NULL) {
*ap->a_size += 1 + nlen;
} else if (ap->a_uio != NULL) {
/*
* Format of extattr name entry is one byte for
* length and the rest for name.
*/
error = uiomove(&nlen, 1, ap->a_uio);
if (error == 0) {
char *namep = dp->d_name + plen;
error = uiomove(namep, nlen, ap->a_uio);
}
if (error != 0) {
error = SET_ERROR(error);
break;
}
}
}
} while (!eof && error == 0);
vput(vp);
return (error);
}
static int
zfs_listextattr_sa(struct vop_listextattr_args *ap, const char *attrprefix)
{
znode_t *zp = VTOZ(ap->a_vp);
int error;
error = zfs_ensure_xattr_cached(zp);
if (error != 0)
return (error);
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
ASSERT3P(zp->z_xattr_cached, !=, NULL);
size_t plen = strlen(attrprefix);
nvpair_t *nvp = NULL;
while ((nvp = nvlist_next_nvpair(zp->z_xattr_cached, nvp)) != NULL) {
ASSERT3U(nvpair_type(nvp), ==, DATA_TYPE_BYTE_ARRAY);
const char *name = nvpair_name(nvp);
if (plen == 0 && strncmp(name, "freebsd:", 8) == 0)
continue;
else if (strncmp(name, attrprefix, plen) != 0)
continue;
uint8_t nlen = strlen(name) - plen;
if (ap->a_size != NULL) {
*ap->a_size += 1 + nlen;
} else if (ap->a_uio != NULL) {
/*
* Format of extattr name entry is one byte for
* length and the rest for name.
*/
error = uiomove(&nlen, 1, ap->a_uio);
if (error == 0) {
char *namep = __DECONST(char *, name) + plen;
error = uiomove(namep, nlen, ap->a_uio);
}
if (error != 0) {
error = SET_ERROR(error);
break;
}
}
}
return (error);
}
/*
* Vnode operation to retrieve extended attributes on a vnode.
*/
static int
zfs_listextattr(struct vop_listextattr_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
char attrprefix[16];
int error;
if (ap->a_size != NULL)
*ap->a_size = 0;
/*
* If the xattr property is off, refuse the request.
*/
if (!(zfsvfs->z_flags & ZSB_XATTR))
return (SET_ERROR(EOPNOTSUPP));
error = extattr_check_cred(ap->a_vp, ap->a_attrnamespace,
ap->a_cred, ap->a_td, VREAD);
if (error != 0)
return (SET_ERROR(error));
error = zfs_create_attrname(ap->a_attrnamespace, "", attrprefix,
sizeof (attrprefix));
if (error != 0)
return (error);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
rw_enter(&zp->z_xattr_lock, RW_READER);
if (zfsvfs->z_use_sa && zp->z_is_sa)
error = zfs_listextattr_sa(ap, attrprefix);
if (error == 0)
error = zfs_listextattr_dir(ap, attrprefix);
rw_exit(&zp->z_xattr_lock);
ZFS_EXIT(zfsvfs);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_getacl_args {
struct vnode *vp;
acl_type_t type;
struct acl *aclp;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_getacl(struct vop_getacl_args *ap)
{
int error;
vsecattr_t vsecattr;
if (ap->a_type != ACL_TYPE_NFS4)
return (EINVAL);
vsecattr.vsa_mask = VSA_ACE | VSA_ACECNT;
if ((error = zfs_getsecattr(VTOZ(ap->a_vp),
&vsecattr, 0, ap->a_cred)))
return (error);
error = acl_from_aces(ap->a_aclp, vsecattr.vsa_aclentp,
vsecattr.vsa_aclcnt);
if (vsecattr.vsa_aclentp != NULL)
kmem_free(vsecattr.vsa_aclentp, vsecattr.vsa_aclentsz);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_setacl_args {
struct vnode *vp;
acl_type_t type;
struct acl *aclp;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_setacl(struct vop_setacl_args *ap)
{
int error;
vsecattr_t vsecattr;
int aclbsize; /* size of acl list in bytes */
aclent_t *aaclp;
if (ap->a_type != ACL_TYPE_NFS4)
return (EINVAL);
if (ap->a_aclp == NULL)
return (EINVAL);
if (ap->a_aclp->acl_cnt < 1 || ap->a_aclp->acl_cnt > MAX_ACL_ENTRIES)
return (EINVAL);
/*
* With NFSv4 ACLs, chmod(2) may need to add additional entries,
* splitting every entry into two and appending "canonical six"
* entries at the end. Don't allow for setting an ACL that would
* cause chmod(2) to run out of ACL entries.
*/
if (ap->a_aclp->acl_cnt * 2 + 6 > ACL_MAX_ENTRIES)
return (ENOSPC);
error = acl_nfs4_check(ap->a_aclp, ap->a_vp->v_type == VDIR);
if (error != 0)
return (error);
vsecattr.vsa_mask = VSA_ACE;
aclbsize = ap->a_aclp->acl_cnt * sizeof (ace_t);
vsecattr.vsa_aclentp = kmem_alloc(aclbsize, KM_SLEEP);
aaclp = vsecattr.vsa_aclentp;
vsecattr.vsa_aclentsz = aclbsize;
aces_from_acl(vsecattr.vsa_aclentp, &vsecattr.vsa_aclcnt, ap->a_aclp);
error = zfs_setsecattr(VTOZ(ap->a_vp), &vsecattr, 0, ap->a_cred);
kmem_free(aaclp, aclbsize);
return (error);
}
#ifndef _SYS_SYSPROTO_H_
struct vop_aclcheck_args {
struct vnode *vp;
acl_type_t type;
struct acl *aclp;
struct ucred *cred;
struct thread *td;
};
#endif
static int
zfs_freebsd_aclcheck(struct vop_aclcheck_args *ap)
{
return (EOPNOTSUPP);
}
static int
zfs_vptocnp(struct vop_vptocnp_args *ap)
{
vnode_t *covered_vp;
vnode_t *vp = ap->a_vp;
zfsvfs_t *zfsvfs = vp->v_vfsp->vfs_data;
znode_t *zp = VTOZ(vp);
int ltype;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
/*
* If we are a snapshot mounted under .zfs, run the operation
* on the covered vnode.
*/
if (zp->z_id != zfsvfs->z_root || zfsvfs->z_parent == zfsvfs) {
char name[MAXNAMLEN + 1];
znode_t *dzp;
size_t len;
error = zfs_znode_parent_and_name(zp, &dzp, name);
if (error == 0) {
len = strlen(name);
if (*ap->a_buflen < len)
error = SET_ERROR(ENOMEM);
}
if (error == 0) {
*ap->a_buflen -= len;
bcopy(name, ap->a_buf + *ap->a_buflen, len);
*ap->a_vpp = ZTOV(dzp);
}
ZFS_EXIT(zfsvfs);
return (error);
}
ZFS_EXIT(zfsvfs);
covered_vp = vp->v_mount->mnt_vnodecovered;
#if __FreeBSD_version >= 1300045
enum vgetstate vs = vget_prep(covered_vp);
#else
vhold(covered_vp);
#endif
ltype = VOP_ISLOCKED(vp);
VOP_UNLOCK1(vp);
#if __FreeBSD_version >= 1300045
error = vget_finish(covered_vp, LK_SHARED, vs);
#else
error = vget(covered_vp, LK_SHARED | LK_VNHELD, curthread);
#endif
if (error == 0) {
#if __FreeBSD_version >= 1300123
error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_buf,
ap->a_buflen);
#else
error = VOP_VPTOCNP(covered_vp, ap->a_vpp, ap->a_cred,
ap->a_buf, ap->a_buflen);
#endif
vput(covered_vp);
}
vn_lock(vp, ltype | LK_RETRY);
if (VN_IS_DOOMED(vp))
error = SET_ERROR(ENOENT);
return (error);
}
#if __FreeBSD_version >= 1400032
static int
zfs_deallocate(struct vop_deallocate_args *ap)
{
znode_t *zp = VTOZ(ap->a_vp);
zfsvfs_t *zfsvfs = zp->z_zfsvfs;
zilog_t *zilog;
off_t off, len, file_sz;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EROFS));
}
zilog = zfsvfs->z_log;
off = *ap->a_offset;
len = *ap->a_len;
file_sz = zp->z_size;
if (off + len > file_sz)
len = file_sz - off;
/* Fast path for out-of-range request. */
if (len <= 0) {
*ap->a_len = 0;
ZFS_EXIT(zfsvfs);
return (0);
}
error = zfs_freesp(zp, off, len, O_RDWR, TRUE);
if (error == 0) {
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS ||
(ap->a_ioflag & IO_SYNC) != 0)
zil_commit(zilog, zp->z_id);
*ap->a_offset = off + len;
*ap->a_len = 0;
}
ZFS_EXIT(zfsvfs);
return (error);
}
#endif
struct vop_vector zfs_vnodeops;
struct vop_vector zfs_fifoops;
struct vop_vector zfs_shareops;
struct vop_vector zfs_vnodeops = {
.vop_default = &default_vnodeops,
.vop_inactive = zfs_freebsd_inactive,
#if __FreeBSD_version >= 1300042
.vop_need_inactive = zfs_freebsd_need_inactive,
#endif
.vop_reclaim = zfs_freebsd_reclaim,
#if __FreeBSD_version >= 1300102
.vop_fplookup_vexec = zfs_freebsd_fplookup_vexec,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = zfs_freebsd_fplookup_symlink,
#endif
.vop_access = zfs_freebsd_access,
.vop_allocate = VOP_EINVAL,
#if __FreeBSD_version >= 1400032
.vop_deallocate = zfs_deallocate,
#endif
.vop_lookup = zfs_cache_lookup,
.vop_cachedlookup = zfs_freebsd_cachedlookup,
.vop_getattr = zfs_freebsd_getattr,
.vop_setattr = zfs_freebsd_setattr,
.vop_create = zfs_freebsd_create,
.vop_mknod = (vop_mknod_t *)zfs_freebsd_create,
.vop_mkdir = zfs_freebsd_mkdir,
.vop_readdir = zfs_freebsd_readdir,
.vop_fsync = zfs_freebsd_fsync,
.vop_open = zfs_freebsd_open,
.vop_close = zfs_freebsd_close,
.vop_rmdir = zfs_freebsd_rmdir,
.vop_ioctl = zfs_freebsd_ioctl,
.vop_link = zfs_freebsd_link,
.vop_symlink = zfs_freebsd_symlink,
.vop_readlink = zfs_freebsd_readlink,
.vop_read = zfs_freebsd_read,
.vop_write = zfs_freebsd_write,
.vop_remove = zfs_freebsd_remove,
.vop_rename = zfs_freebsd_rename,
.vop_pathconf = zfs_freebsd_pathconf,
.vop_bmap = zfs_freebsd_bmap,
.vop_fid = zfs_freebsd_fid,
.vop_getextattr = zfs_getextattr,
.vop_deleteextattr = zfs_deleteextattr,
.vop_setextattr = zfs_setextattr,
.vop_listextattr = zfs_listextattr,
.vop_getacl = zfs_freebsd_getacl,
.vop_setacl = zfs_freebsd_setacl,
.vop_aclcheck = zfs_freebsd_aclcheck,
.vop_getpages = zfs_freebsd_getpages,
.vop_putpages = zfs_freebsd_putpages,
.vop_vptocnp = zfs_vptocnp,
#if __FreeBSD_version >= 1300064
.vop_lock1 = vop_lock,
.vop_unlock = vop_unlock,
.vop_islocked = vop_islocked,
#endif
};
VFS_VOP_VECTOR_REGISTER(zfs_vnodeops);
struct vop_vector zfs_fifoops = {
.vop_default = &fifo_specops,
.vop_fsync = zfs_freebsd_fsync,
#if __FreeBSD_version >= 1300102
.vop_fplookup_vexec = zfs_freebsd_fplookup_vexec,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = zfs_freebsd_fplookup_symlink,
#endif
.vop_access = zfs_freebsd_access,
.vop_getattr = zfs_freebsd_getattr,
.vop_inactive = zfs_freebsd_inactive,
.vop_read = VOP_PANIC,
.vop_reclaim = zfs_freebsd_reclaim,
.vop_setattr = zfs_freebsd_setattr,
.vop_write = VOP_PANIC,
.vop_pathconf = zfs_freebsd_pathconf,
.vop_fid = zfs_freebsd_fid,
.vop_getacl = zfs_freebsd_getacl,
.vop_setacl = zfs_freebsd_setacl,
.vop_aclcheck = zfs_freebsd_aclcheck,
};
VFS_VOP_VECTOR_REGISTER(zfs_fifoops);
/*
* special share hidden files vnode operations template
*/
struct vop_vector zfs_shareops = {
.vop_default = &default_vnodeops,
#if __FreeBSD_version >= 1300121
.vop_fplookup_vexec = VOP_EAGAIN,
#endif
#if __FreeBSD_version >= 1300139
.vop_fplookup_symlink = VOP_EAGAIN,
#endif
.vop_access = zfs_freebsd_access,
.vop_inactive = zfs_freebsd_inactive,
.vop_reclaim = zfs_freebsd_reclaim,
.vop_fid = zfs_freebsd_fid,
.vop_pathconf = zfs_freebsd_pathconf,
};
VFS_VOP_VECTOR_REGISTER(zfs_shareops);
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
index 1233c32deac1..cf37aecf8a22 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_acl.c
@@ -1,2948 +1,2948 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013 by Delphix. All rights reserved.
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/sid.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/fs/zfs.h>
#include <sys/policy.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_quota.h>
#include <sys/zfs_vfsops.h>
#include <sys/dmu.h>
#include <sys/dnode.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/trace_acl.h>
#include <sys/zpl.h>
#define ALLOW ACE_ACCESS_ALLOWED_ACE_TYPE
#define DENY ACE_ACCESS_DENIED_ACE_TYPE
#define MAX_ACE_TYPE ACE_SYSTEM_ALARM_CALLBACK_OBJECT_ACE_TYPE
#define MIN_ACE_TYPE ALLOW
#define OWNING_GROUP (ACE_GROUP|ACE_IDENTIFIER_GROUP)
#define EVERYONE_ALLOW_MASK (ACE_READ_ACL|ACE_READ_ATTRIBUTES | \
ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE)
#define EVERYONE_DENY_MASK (ACE_WRITE_ACL|ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define OWNER_ALLOW_MASK (ACE_WRITE_ACL | ACE_WRITE_OWNER | \
ACE_WRITE_ATTRIBUTES|ACE_WRITE_NAMED_ATTRS)
#define ZFS_CHECKED_MASKS (ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_DATA| \
ACE_READ_NAMED_ATTRS|ACE_WRITE_DATA|ACE_WRITE_ATTRIBUTES| \
ACE_WRITE_NAMED_ATTRS|ACE_APPEND_DATA|ACE_EXECUTE|ACE_WRITE_OWNER| \
ACE_WRITE_ACL|ACE_DELETE|ACE_DELETE_CHILD|ACE_SYNCHRONIZE)
#define WRITE_MASK_DATA (ACE_WRITE_DATA|ACE_APPEND_DATA|ACE_WRITE_NAMED_ATTRS)
#define WRITE_MASK_ATTRS (ACE_WRITE_ACL|ACE_WRITE_OWNER|ACE_WRITE_ATTRIBUTES| \
ACE_DELETE|ACE_DELETE_CHILD)
#define WRITE_MASK (WRITE_MASK_DATA|WRITE_MASK_ATTRS)
#define OGE_CLEAR (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define OKAY_MASK_BITS (ACE_READ_DATA|ACE_LIST_DIRECTORY|ACE_WRITE_DATA| \
ACE_ADD_FILE|ACE_APPEND_DATA|ACE_ADD_SUBDIRECTORY|ACE_EXECUTE)
#define ALL_INHERIT (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE | \
ACE_NO_PROPAGATE_INHERIT_ACE|ACE_INHERIT_ONLY_ACE|ACE_INHERITED_ACE)
#define RESTRICTED_CLEAR (ACE_WRITE_ACL|ACE_WRITE_OWNER)
#define V4_ACL_WIDE_FLAGS (ZFS_ACL_AUTO_INHERIT|ZFS_ACL_DEFAULTED|\
ZFS_ACL_PROTECTED)
#define ZFS_ACL_WIDE_FLAGS (V4_ACL_WIDE_FLAGS|ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|\
ZFS_ACL_OBJ_ACE)
#define ALL_MODE_EXECS (S_IXUSR | S_IXGRP | S_IXOTH)
#define IDMAP_WK_CREATOR_OWNER_UID 2147483648U
static uint16_t
zfs_ace_v0_get_type(void *acep)
{
return (((zfs_oldace_t *)acep)->z_type);
}
static uint16_t
zfs_ace_v0_get_flags(void *acep)
{
return (((zfs_oldace_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_v0_get_mask(void *acep)
{
return (((zfs_oldace_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_v0_get_who(void *acep)
{
return (((zfs_oldace_t *)acep)->z_fuid);
}
static void
zfs_ace_v0_set_type(void *acep, uint16_t type)
{
((zfs_oldace_t *)acep)->z_type = type;
}
static void
zfs_ace_v0_set_flags(void *acep, uint16_t flags)
{
((zfs_oldace_t *)acep)->z_flags = flags;
}
static void
zfs_ace_v0_set_mask(void *acep, uint32_t mask)
{
((zfs_oldace_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_v0_set_who(void *acep, uint64_t who)
{
((zfs_oldace_t *)acep)->z_fuid = who;
}
/*ARGSUSED*/
static size_t
zfs_ace_v0_size(void *acep)
{
return (sizeof (zfs_oldace_t));
}
static size_t
zfs_ace_v0_abstract_size(void)
{
return (sizeof (zfs_oldace_t));
}
static int
zfs_ace_v0_mask_off(void)
{
return (offsetof(zfs_oldace_t, z_access_mask));
}
/*ARGSUSED*/
static int
zfs_ace_v0_data(void *acep, void **datap)
{
*datap = NULL;
return (0);
}
static acl_ops_t zfs_acl_v0_ops = {
.ace_mask_get = zfs_ace_v0_get_mask,
.ace_mask_set = zfs_ace_v0_set_mask,
.ace_flags_get = zfs_ace_v0_get_flags,
.ace_flags_set = zfs_ace_v0_set_flags,
.ace_type_get = zfs_ace_v0_get_type,
.ace_type_set = zfs_ace_v0_set_type,
.ace_who_get = zfs_ace_v0_get_who,
.ace_who_set = zfs_ace_v0_set_who,
.ace_size = zfs_ace_v0_size,
.ace_abstract_size = zfs_ace_v0_abstract_size,
.ace_mask_off = zfs_ace_v0_mask_off,
.ace_data = zfs_ace_v0_data
};
static uint16_t
zfs_ace_fuid_get_type(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_type);
}
static uint16_t
zfs_ace_fuid_get_flags(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_flags);
}
static uint32_t
zfs_ace_fuid_get_mask(void *acep)
{
return (((zfs_ace_hdr_t *)acep)->z_access_mask);
}
static uint64_t
zfs_ace_fuid_get_who(void *args)
{
uint16_t entry_type;
zfs_ace_t *acep = args;
entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (-1);
return (((zfs_ace_t *)acep)->z_fuid);
}
static void
zfs_ace_fuid_set_type(void *acep, uint16_t type)
{
((zfs_ace_hdr_t *)acep)->z_type = type;
}
static void
zfs_ace_fuid_set_flags(void *acep, uint16_t flags)
{
((zfs_ace_hdr_t *)acep)->z_flags = flags;
}
static void
zfs_ace_fuid_set_mask(void *acep, uint32_t mask)
{
((zfs_ace_hdr_t *)acep)->z_access_mask = mask;
}
static void
zfs_ace_fuid_set_who(void *arg, uint64_t who)
{
zfs_ace_t *acep = arg;
uint16_t entry_type = acep->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type == ACE_OWNER || entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return;
acep->z_fuid = who;
}
static size_t
zfs_ace_fuid_size(void *acep)
{
zfs_ace_hdr_t *zacep = acep;
uint16_t entry_type;
switch (zacep->z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
return (sizeof (zfs_object_ace_t));
case ALLOW:
case DENY:
entry_type =
(((zfs_ace_hdr_t *)acep)->z_flags & ACE_TYPE_FLAGS);
if (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)
return (sizeof (zfs_ace_hdr_t));
- /* FALLTHROUGH */
+ fallthrough;
default:
return (sizeof (zfs_ace_t));
}
}
static size_t
zfs_ace_fuid_abstract_size(void)
{
return (sizeof (zfs_ace_hdr_t));
}
static int
zfs_ace_fuid_mask_off(void)
{
return (offsetof(zfs_ace_hdr_t, z_access_mask));
}
static int
zfs_ace_fuid_data(void *acep, void **datap)
{
zfs_ace_t *zacep = acep;
zfs_object_ace_t *zobjp;
switch (zacep->z_hdr.z_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjp = acep;
*datap = (caddr_t)zobjp + sizeof (zfs_ace_t);
return (sizeof (zfs_object_ace_t) - sizeof (zfs_ace_t));
default:
*datap = NULL;
return (0);
}
}
static acl_ops_t zfs_acl_fuid_ops = {
.ace_mask_get = zfs_ace_fuid_get_mask,
.ace_mask_set = zfs_ace_fuid_set_mask,
.ace_flags_get = zfs_ace_fuid_get_flags,
.ace_flags_set = zfs_ace_fuid_set_flags,
.ace_type_get = zfs_ace_fuid_get_type,
.ace_type_set = zfs_ace_fuid_set_type,
.ace_who_get = zfs_ace_fuid_get_who,
.ace_who_set = zfs_ace_fuid_set_who,
.ace_size = zfs_ace_fuid_size,
.ace_abstract_size = zfs_ace_fuid_abstract_size,
.ace_mask_off = zfs_ace_fuid_mask_off,
.ace_data = zfs_ace_fuid_data
};
/*
* The following three functions are provided for compatibility with
* older ZPL version in order to determine if the file use to have
* an external ACL and what version of ACL previously existed on the
* file. Would really be nice to not need this, sigh.
*/
uint64_t
zfs_external_acl(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
int error;
if (zp->z_is_sa)
return (0);
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(ZTOZSB(zp)),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_extern_obj);
else {
/*
* after upgrade the SA_ZPL_ZNODE_ACL should have been
* removed
*/
VERIFY(zp->z_is_sa && error == ENOENT);
return (0);
}
}
/*
* Determine size of ACL in bytes
*
* This is more complicated than it should be since we have to deal
* with old external ACLs.
*/
static int
zfs_acl_znode_info(znode_t *zp, int *aclsize, int *aclcount,
zfs_acl_phys_t *aclphys)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t acl_count;
int size;
int error;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_is_sa) {
if ((error = sa_size(zp->z_sa_hdl, SA_ZPL_DACL_ACES(zfsvfs),
&size)) != 0)
return (error);
*aclsize = size;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_COUNT(zfsvfs),
&acl_count, sizeof (acl_count))) != 0)
return (error);
*aclcount = acl_count;
} else {
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
aclphys, sizeof (*aclphys))) != 0)
return (error);
if (aclphys->z_acl_version == ZFS_ACL_VERSION_INITIAL) {
*aclsize = ZFS_ACL_SIZE(aclphys->z_acl_size);
*aclcount = aclphys->z_acl_size;
} else {
*aclsize = aclphys->z_acl_size;
*aclcount = aclphys->z_acl_count;
}
}
return (0);
}
int
zfs_znode_acl_version(znode_t *zp)
{
zfs_acl_phys_t acl_phys;
if (zp->z_is_sa)
return (ZFS_ACL_VERSION_FUID);
else {
int error;
/*
* Need to deal with a potential
* race where zfs_sa_upgrade could cause
* z_isa_sa to change.
*
* If the lookup fails then the state of z_is_sa should have
* changed.
*/
if ((error = sa_lookup(zp->z_sa_hdl,
SA_ZPL_ZNODE_ACL(ZTOZSB(zp)),
&acl_phys, sizeof (acl_phys))) == 0)
return (acl_phys.z_acl_version);
else {
/*
* After upgrade SA_ZPL_ZNODE_ACL should have
* been removed.
*/
VERIFY(zp->z_is_sa && error == ENOENT);
return (ZFS_ACL_VERSION_FUID);
}
}
}
static int
zfs_acl_version(int version)
{
if (version < ZPL_VERSION_FUID)
return (ZFS_ACL_VERSION_INITIAL);
else
return (ZFS_ACL_VERSION_FUID);
}
static int
zfs_acl_version_zp(znode_t *zp)
{
return (zfs_acl_version(ZTOZSB(zp)->z_version));
}
zfs_acl_t *
zfs_acl_alloc(int vers)
{
zfs_acl_t *aclp;
aclp = kmem_zalloc(sizeof (zfs_acl_t), KM_SLEEP);
list_create(&aclp->z_acl, sizeof (zfs_acl_node_t),
offsetof(zfs_acl_node_t, z_next));
aclp->z_version = vers;
if (vers == ZFS_ACL_VERSION_FUID)
aclp->z_ops = &zfs_acl_fuid_ops;
else
aclp->z_ops = &zfs_acl_v0_ops;
return (aclp);
}
zfs_acl_node_t *
zfs_acl_node_alloc(size_t bytes)
{
zfs_acl_node_t *aclnode;
aclnode = kmem_zalloc(sizeof (zfs_acl_node_t), KM_SLEEP);
if (bytes) {
aclnode->z_acldata = kmem_alloc(bytes, KM_SLEEP);
aclnode->z_allocdata = aclnode->z_acldata;
aclnode->z_allocsize = bytes;
aclnode->z_size = bytes;
}
return (aclnode);
}
static void
zfs_acl_node_free(zfs_acl_node_t *aclnode)
{
if (aclnode->z_allocsize)
kmem_free(aclnode->z_allocdata, aclnode->z_allocsize);
kmem_free(aclnode, sizeof (zfs_acl_node_t));
}
static void
zfs_acl_release_nodes(zfs_acl_t *aclp)
{
zfs_acl_node_t *aclnode;
while ((aclnode = list_head(&aclp->z_acl))) {
list_remove(&aclp->z_acl, aclnode);
zfs_acl_node_free(aclnode);
}
aclp->z_acl_count = 0;
aclp->z_acl_bytes = 0;
}
void
zfs_acl_free(zfs_acl_t *aclp)
{
zfs_acl_release_nodes(aclp);
list_destroy(&aclp->z_acl);
kmem_free(aclp, sizeof (zfs_acl_t));
}
static boolean_t
zfs_acl_valid_ace_type(uint_t type, uint_t flags)
{
uint16_t entry_type;
switch (type) {
case ALLOW:
case DENY:
case ACE_SYSTEM_AUDIT_ACE_TYPE:
case ACE_SYSTEM_ALARM_ACE_TYPE:
entry_type = flags & ACE_TYPE_FLAGS;
return (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE || entry_type == 0 ||
entry_type == ACE_IDENTIFIER_GROUP);
default:
if (type >= MIN_ACE_TYPE && type <= MAX_ACE_TYPE)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
zfs_ace_valid(umode_t obj_mode, zfs_acl_t *aclp, uint16_t type, uint16_t iflags)
{
/*
* first check type of entry
*/
if (!zfs_acl_valid_ace_type(type, iflags))
return (B_FALSE);
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (aclp->z_version < ZFS_ACL_VERSION_FUID)
return (B_FALSE);
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
}
/*
* next check inheritance level flags
*/
if (S_ISDIR(obj_mode) &&
(iflags & (ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if (iflags & (ACE_INHERIT_ONLY_ACE|ACE_NO_PROPAGATE_INHERIT_ACE)) {
if ((iflags & (ACE_FILE_INHERIT_ACE|
ACE_DIRECTORY_INHERIT_ACE)) == 0) {
return (B_FALSE);
}
}
return (B_TRUE);
}
static void *
zfs_acl_next_ace(zfs_acl_t *aclp, void *start, uint64_t *who,
uint32_t *access_mask, uint16_t *iflags, uint16_t *type)
{
zfs_acl_node_t *aclnode;
ASSERT(aclp);
if (start == NULL) {
aclnode = list_head(&aclp->z_acl);
if (aclnode == NULL)
return (NULL);
aclp->z_next_ace = aclnode->z_acldata;
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
}
aclnode = aclp->z_curr_node;
if (aclnode == NULL)
return (NULL);
if (aclnode->z_ace_idx >= aclnode->z_ace_count) {
aclnode = list_next(&aclp->z_acl, aclnode);
if (aclnode == NULL)
return (NULL);
else {
aclp->z_curr_node = aclnode;
aclnode->z_ace_idx = 0;
aclp->z_next_ace = aclnode->z_acldata;
}
}
if (aclnode->z_ace_idx < aclnode->z_ace_count) {
void *acep = aclp->z_next_ace;
size_t ace_size;
/*
* Make sure we don't overstep our bounds
*/
ace_size = aclp->z_ops->ace_size(acep);
if (((caddr_t)acep + ace_size) >
((caddr_t)aclnode->z_acldata + aclnode->z_size)) {
return (NULL);
}
*iflags = aclp->z_ops->ace_flags_get(acep);
*type = aclp->z_ops->ace_type_get(acep);
*access_mask = aclp->z_ops->ace_mask_get(acep);
*who = aclp->z_ops->ace_who_get(acep);
aclp->z_next_ace = (caddr_t)aclp->z_next_ace + ace_size;
aclnode->z_ace_idx++;
return ((void *)acep);
}
return (NULL);
}
/*ARGSUSED*/
static uint64_t
zfs_ace_walk(void *datap, uint64_t cookie, int aclcnt,
uint16_t *flags, uint16_t *type, uint32_t *mask)
{
zfs_acl_t *aclp = datap;
zfs_ace_hdr_t *acep = (zfs_ace_hdr_t *)(uintptr_t)cookie;
uint64_t who;
acep = zfs_acl_next_ace(aclp, acep, &who, mask,
flags, type);
return ((uint64_t)(uintptr_t)acep);
}
/*
* Copy ACE to internal ZFS format.
* While processing the ACL each ACE will be validated for correctness.
* ACE FUIDs will be created later.
*/
static int
zfs_copy_ace_2_fuid(zfsvfs_t *zfsvfs, umode_t obj_mode, zfs_acl_t *aclp,
void *datap, zfs_ace_t *z_acl, uint64_t aclcnt, size_t *size,
zfs_fuid_info_t **fuidp, cred_t *cr)
{
int i;
uint16_t entry_type;
zfs_ace_t *aceptr = z_acl;
ace_t *acep = datap;
zfs_object_ace_t *zobjacep;
ace_object_t *aceobjp;
for (i = 0; i != aclcnt; i++) {
aceptr->z_hdr.z_access_mask = acep->a_access_mask;
aceptr->z_hdr.z_flags = acep->a_flags;
aceptr->z_hdr.z_type = acep->a_type;
entry_type = aceptr->z_hdr.z_flags & ACE_TYPE_FLAGS;
if (entry_type != ACE_OWNER && entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE) {
aceptr->z_fuid = zfs_fuid_create(zfsvfs, acep->a_who,
cr, (entry_type == 0) ?
ZFS_ACE_USER : ZFS_ACE_GROUP, fuidp);
}
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_mode, aclp, aceptr->z_hdr.z_type,
aceptr->z_hdr.z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
switch (acep->a_type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
zobjacep = (zfs_object_ace_t *)aceptr;
aceobjp = (ace_object_t *)acep;
bcopy(aceobjp->a_obj_type, zobjacep->z_object_type,
sizeof (aceobjp->a_obj_type));
bcopy(aceobjp->a_inherit_obj_type,
zobjacep->z_inherit_type,
sizeof (aceobjp->a_inherit_obj_type));
acep = (ace_t *)((caddr_t)acep + sizeof (ace_object_t));
break;
default:
acep = (ace_t *)((caddr_t)acep + sizeof (ace_t));
}
aceptr = (zfs_ace_t *)((caddr_t)aceptr +
aclp->z_ops->ace_size(aceptr));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* Copy ZFS ACEs to fixed size ace_t layout
*/
static void
zfs_copy_fuid_2_ace(zfsvfs_t *zfsvfs, zfs_acl_t *aclp, cred_t *cr,
void *datap, int filter)
{
uint64_t who;
uint32_t access_mask;
uint16_t iflags, type;
zfs_ace_hdr_t *zacep = NULL;
ace_t *acep = datap;
ace_object_t *objacep;
zfs_object_ace_t *zobjacep;
size_t ace_size;
uint16_t entry_type;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
if (filter) {
continue;
}
zobjacep = (zfs_object_ace_t *)zacep;
objacep = (ace_object_t *)acep;
bcopy(zobjacep->z_object_type,
objacep->a_obj_type,
sizeof (zobjacep->z_object_type));
bcopy(zobjacep->z_inherit_type,
objacep->a_inherit_obj_type,
sizeof (zobjacep->z_inherit_type));
ace_size = sizeof (ace_object_t);
break;
default:
ace_size = sizeof (ace_t);
break;
}
entry_type = (iflags & ACE_TYPE_FLAGS);
if ((entry_type != ACE_OWNER &&
entry_type != OWNING_GROUP &&
entry_type != ACE_EVERYONE)) {
acep->a_who = zfs_fuid_map_id(zfsvfs, who,
cr, (entry_type & ACE_IDENTIFIER_GROUP) ?
ZFS_ACE_GROUP : ZFS_ACE_USER);
} else {
acep->a_who = (uid_t)(int64_t)who;
}
acep->a_access_mask = access_mask;
acep->a_flags = iflags;
acep->a_type = type;
acep = (ace_t *)((caddr_t)acep + ace_size);
}
}
static int
zfs_copy_ace_2_oldace(umode_t obj_mode, zfs_acl_t *aclp, ace_t *acep,
zfs_oldace_t *z_acl, int aclcnt, size_t *size)
{
int i;
zfs_oldace_t *aceptr = z_acl;
for (i = 0; i != aclcnt; i++, aceptr++) {
aceptr->z_access_mask = acep[i].a_access_mask;
aceptr->z_type = acep[i].a_type;
aceptr->z_flags = acep[i].a_flags;
aceptr->z_fuid = acep[i].a_who;
/*
* Make sure ACE is valid
*/
if (zfs_ace_valid(obj_mode, aclp, aceptr->z_type,
aceptr->z_flags) != B_TRUE)
return (SET_ERROR(EINVAL));
}
*size = (caddr_t)aceptr - (caddr_t)z_acl;
return (0);
}
/*
* convert old ACL format to new
*/
void
zfs_acl_xform(znode_t *zp, zfs_acl_t *aclp, cred_t *cr)
{
zfs_oldace_t *oldaclp;
int i;
uint16_t type, iflags;
uint32_t access_mask;
uint64_t who;
void *cookie = NULL;
zfs_acl_node_t *newaclnode;
ASSERT(aclp->z_version == ZFS_ACL_VERSION_INITIAL);
/*
* First create the ACE in a contiguous piece of memory
* for zfs_copy_ace_2_fuid().
*
* We only convert an ACL once, so this won't happen
* every time.
*/
oldaclp = kmem_alloc(sizeof (zfs_oldace_t) * aclp->z_acl_count,
KM_SLEEP);
i = 0;
while ((cookie = zfs_acl_next_ace(aclp, cookie, &who,
&access_mask, &iflags, &type))) {
oldaclp[i].z_flags = iflags;
oldaclp[i].z_type = type;
oldaclp[i].z_fuid = who;
oldaclp[i++].z_access_mask = access_mask;
}
newaclnode = zfs_acl_node_alloc(aclp->z_acl_count *
sizeof (zfs_object_ace_t));
aclp->z_ops = &zfs_acl_fuid_ops;
VERIFY(zfs_copy_ace_2_fuid(ZTOZSB(zp), ZTOI(zp)->i_mode,
aclp, oldaclp, newaclnode->z_acldata, aclp->z_acl_count,
&newaclnode->z_size, NULL, cr) == 0);
newaclnode->z_ace_count = aclp->z_acl_count;
aclp->z_version = ZFS_ACL_VERSION;
kmem_free(oldaclp, aclp->z_acl_count * sizeof (zfs_oldace_t));
/*
* Release all previous ACL nodes
*/
zfs_acl_release_nodes(aclp);
list_insert_head(&aclp->z_acl, newaclnode);
aclp->z_acl_bytes = newaclnode->z_size;
aclp->z_acl_count = newaclnode->z_ace_count;
}
/*
* Convert unix access mask to v4 access mask
*/
static uint32_t
zfs_unix_to_v4(uint32_t access_mask)
{
uint32_t new_mask = 0;
if (access_mask & S_IXOTH)
new_mask |= ACE_EXECUTE;
if (access_mask & S_IWOTH)
new_mask |= ACE_WRITE_DATA;
if (access_mask & S_IROTH)
new_mask |= ACE_READ_DATA;
return (new_mask);
}
static void
zfs_set_ace(zfs_acl_t *aclp, void *acep, uint32_t access_mask,
uint16_t access_type, uint64_t fuid, uint16_t entry_type)
{
uint16_t type = entry_type & ACE_TYPE_FLAGS;
aclp->z_ops->ace_mask_set(acep, access_mask);
aclp->z_ops->ace_type_set(acep, access_type);
aclp->z_ops->ace_flags_set(acep, entry_type);
if ((type != ACE_OWNER && type != OWNING_GROUP &&
type != ACE_EVERYONE))
aclp->z_ops->ace_who_set(acep, fuid);
}
/*
* Determine mode of file based on ACL.
*/
uint64_t
zfs_mode_compute(uint64_t fmode, zfs_acl_t *aclp,
uint64_t *pflags, uint64_t fuid, uint64_t fgid)
{
int entry_type;
mode_t mode;
mode_t seen = 0;
zfs_ace_hdr_t *acep = NULL;
uint64_t who;
uint16_t iflags, type;
uint32_t access_mask;
boolean_t an_exec_denied = B_FALSE;
mode = (fmode & (S_IFMT | S_ISUID | S_ISGID | S_ISVTX));
while ((acep = zfs_acl_next_ace(aclp, acep, &who,
&access_mask, &iflags, &type))) {
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* Skip over any inherit_only ACEs
*/
if (iflags & ACE_INHERIT_ONLY_ACE)
continue;
if (entry_type == ACE_OWNER || (entry_type == 0 &&
who == fuid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRUSR))) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWUSR))) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXUSR))) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
} else if (entry_type == OWNING_GROUP ||
(entry_type == ACE_IDENTIFIER_GROUP && who == fgid)) {
if ((access_mask & ACE_READ_DATA) &&
(!(seen & S_IRGRP))) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if ((access_mask & ACE_WRITE_DATA) &&
(!(seen & S_IWGRP))) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if ((access_mask & ACE_EXECUTE) &&
(!(seen & S_IXGRP))) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
} else if (entry_type == ACE_EVERYONE) {
if ((access_mask & ACE_READ_DATA)) {
if (!(seen & S_IRUSR)) {
seen |= S_IRUSR;
if (type == ALLOW) {
mode |= S_IRUSR;
}
}
if (!(seen & S_IRGRP)) {
seen |= S_IRGRP;
if (type == ALLOW) {
mode |= S_IRGRP;
}
}
if (!(seen & S_IROTH)) {
seen |= S_IROTH;
if (type == ALLOW) {
mode |= S_IROTH;
}
}
}
if ((access_mask & ACE_WRITE_DATA)) {
if (!(seen & S_IWUSR)) {
seen |= S_IWUSR;
if (type == ALLOW) {
mode |= S_IWUSR;
}
}
if (!(seen & S_IWGRP)) {
seen |= S_IWGRP;
if (type == ALLOW) {
mode |= S_IWGRP;
}
}
if (!(seen & S_IWOTH)) {
seen |= S_IWOTH;
if (type == ALLOW) {
mode |= S_IWOTH;
}
}
}
if ((access_mask & ACE_EXECUTE)) {
if (!(seen & S_IXUSR)) {
seen |= S_IXUSR;
if (type == ALLOW) {
mode |= S_IXUSR;
}
}
if (!(seen & S_IXGRP)) {
seen |= S_IXGRP;
if (type == ALLOW) {
mode |= S_IXGRP;
}
}
if (!(seen & S_IXOTH)) {
seen |= S_IXOTH;
if (type == ALLOW) {
mode |= S_IXOTH;
}
}
}
} else {
/*
* Only care if this IDENTIFIER_GROUP or
* USER ACE denies execute access to someone,
* mode is not affected
*/
if ((access_mask & ACE_EXECUTE) && type == DENY)
an_exec_denied = B_TRUE;
}
}
/*
* Failure to allow is effectively a deny, so execute permission
* is denied if it was never mentioned or if we explicitly
* weren't allowed it.
*/
if (!an_exec_denied &&
((seen & ALL_MODE_EXECS) != ALL_MODE_EXECS ||
(mode & ALL_MODE_EXECS) != ALL_MODE_EXECS))
an_exec_denied = B_TRUE;
if (an_exec_denied)
*pflags &= ~ZFS_NO_EXECS_DENIED;
else
*pflags |= ZFS_NO_EXECS_DENIED;
return (mode);
}
/*
* Read an external acl object. If the intent is to modify, always
* create a new acl and leave any cached acl in place.
*/
int
zfs_acl_node_read(struct znode *zp, boolean_t have_lock, zfs_acl_t **aclpp,
boolean_t will_modify)
{
zfs_acl_t *aclp;
int aclsize = 0;
int acl_count = 0;
zfs_acl_node_t *aclnode;
zfs_acl_phys_t znode_acl;
int version;
int error;
boolean_t drop_lock = B_FALSE;
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
if (zp->z_acl_cached && !will_modify) {
*aclpp = zp->z_acl_cached;
return (0);
}
/*
* close race where znode could be upgrade while trying to
* read the znode attributes.
*
* But this could only happen if the file isn't already an SA
* znode
*/
if (!zp->z_is_sa && !have_lock) {
mutex_enter(&zp->z_lock);
drop_lock = B_TRUE;
}
version = zfs_znode_acl_version(zp);
if ((error = zfs_acl_znode_info(zp, &aclsize,
&acl_count, &znode_acl)) != 0) {
goto done;
}
aclp = zfs_acl_alloc(version);
aclp->z_acl_count = acl_count;
aclp->z_acl_bytes = aclsize;
aclnode = zfs_acl_node_alloc(aclsize);
aclnode->z_ace_count = aclp->z_acl_count;
aclnode->z_size = aclsize;
if (!zp->z_is_sa) {
if (znode_acl.z_acl_extern_obj) {
error = dmu_read(ZTOZSB(zp)->z_os,
znode_acl.z_acl_extern_obj, 0, aclnode->z_size,
aclnode->z_acldata, DMU_READ_PREFETCH);
} else {
bcopy(znode_acl.z_ace_data, aclnode->z_acldata,
aclnode->z_size);
}
} else {
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_DACL_ACES(ZTOZSB(zp)),
aclnode->z_acldata, aclnode->z_size);
}
if (error != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
goto done;
}
list_insert_head(&aclp->z_acl, aclnode);
*aclpp = aclp;
if (!will_modify)
zp->z_acl_cached = aclp;
done:
if (drop_lock)
mutex_exit(&zp->z_lock);
return (error);
}
/*ARGSUSED*/
void
zfs_acl_data_locator(void **dataptr, uint32_t *length, uint32_t buflen,
boolean_t start, void *userdata)
{
zfs_acl_locator_cb_t *cb = (zfs_acl_locator_cb_t *)userdata;
if (start) {
cb->cb_acl_node = list_head(&cb->cb_aclp->z_acl);
} else {
cb->cb_acl_node = list_next(&cb->cb_aclp->z_acl,
cb->cb_acl_node);
}
*dataptr = cb->cb_acl_node->z_acldata;
*length = cb->cb_acl_node->z_size;
}
int
zfs_acl_chown_setattr(znode_t *zp)
{
int error;
zfs_acl_t *aclp;
if (ZTOZSB(zp)->z_acl_type == ZFS_ACLTYPE_POSIX)
return (0);
ASSERT(MUTEX_HELD(&zp->z_lock));
ASSERT(MUTEX_HELD(&zp->z_acl_lock));
error = zfs_acl_node_read(zp, B_TRUE, &aclp, B_FALSE);
if (error == 0 && aclp->z_acl_count > 0)
zp->z_mode = ZTOI(zp)->i_mode =
zfs_mode_compute(zp->z_mode, aclp,
&zp->z_pflags, KUID_TO_SUID(ZTOI(zp)->i_uid),
KGID_TO_SGID(ZTOI(zp)->i_gid));
/*
* Some ZFS implementations (ZEVO) create neither a ZNODE_ACL
* nor a DACL_ACES SA in which case ENOENT is returned from
* zfs_acl_node_read() when the SA can't be located.
* Allow chown/chgrp to succeed in these cases rather than
* returning an error that makes no sense in the context of
* the caller.
*/
if (error == ENOENT)
return (0);
return (error);
}
typedef struct trivial_acl {
uint32_t allow0; /* allow mask for bits only in owner */
uint32_t deny1; /* deny mask for bits not in owner */
uint32_t deny2; /* deny mask for bits not in group */
uint32_t owner; /* allow mask matching mode */
uint32_t group; /* allow mask matching mode */
uint32_t everyone; /* allow mask matching mode */
} trivial_acl_t;
static void
acl_trivial_access_masks(mode_t mode, boolean_t isdir, trivial_acl_t *masks)
{
uint32_t read_mask = ACE_READ_DATA;
uint32_t write_mask = ACE_WRITE_DATA|ACE_APPEND_DATA;
uint32_t execute_mask = ACE_EXECUTE;
if (isdir)
write_mask |= ACE_DELETE_CHILD;
masks->deny1 = 0;
if (!(mode & S_IRUSR) && (mode & (S_IRGRP|S_IROTH)))
masks->deny1 |= read_mask;
if (!(mode & S_IWUSR) && (mode & (S_IWGRP|S_IWOTH)))
masks->deny1 |= write_mask;
if (!(mode & S_IXUSR) && (mode & (S_IXGRP|S_IXOTH)))
masks->deny1 |= execute_mask;
masks->deny2 = 0;
if (!(mode & S_IRGRP) && (mode & S_IROTH))
masks->deny2 |= read_mask;
if (!(mode & S_IWGRP) && (mode & S_IWOTH))
masks->deny2 |= write_mask;
if (!(mode & S_IXGRP) && (mode & S_IXOTH))
masks->deny2 |= execute_mask;
masks->allow0 = 0;
if ((mode & S_IRUSR) && (!(mode & S_IRGRP) && (mode & S_IROTH)))
masks->allow0 |= read_mask;
if ((mode & S_IWUSR) && (!(mode & S_IWGRP) && (mode & S_IWOTH)))
masks->allow0 |= write_mask;
if ((mode & S_IXUSR) && (!(mode & S_IXGRP) && (mode & S_IXOTH)))
masks->allow0 |= execute_mask;
masks->owner = ACE_WRITE_ATTRIBUTES|ACE_WRITE_OWNER|ACE_WRITE_ACL|
ACE_WRITE_NAMED_ATTRS|ACE_READ_ACL|ACE_READ_ATTRIBUTES|
ACE_READ_NAMED_ATTRS|ACE_SYNCHRONIZE;
if (mode & S_IRUSR)
masks->owner |= read_mask;
if (mode & S_IWUSR)
masks->owner |= write_mask;
if (mode & S_IXUSR)
masks->owner |= execute_mask;
masks->group = ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_NAMED_ATTRS|
ACE_SYNCHRONIZE;
if (mode & S_IRGRP)
masks->group |= read_mask;
if (mode & S_IWGRP)
masks->group |= write_mask;
if (mode & S_IXGRP)
masks->group |= execute_mask;
masks->everyone = ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_READ_NAMED_ATTRS|
ACE_SYNCHRONIZE;
if (mode & S_IROTH)
masks->everyone |= read_mask;
if (mode & S_IWOTH)
masks->everyone |= write_mask;
if (mode & S_IXOTH)
masks->everyone |= execute_mask;
}
/*
* ace_trivial:
* determine whether an ace_t acl is trivial
*
* Trivialness implies that the acl is composed of only
* owner, group, everyone entries. ACL can't
* have read_acl denied, and write_owner/write_acl/write_attributes
* can only be owner@ entry.
*/
static int
ace_trivial_common(void *acep, int aclcnt,
uint64_t (*walk)(void *, uint64_t, int aclcnt,
uint16_t *, uint16_t *, uint32_t *))
{
uint16_t flags;
uint32_t mask;
uint16_t type;
uint64_t cookie = 0;
while ((cookie = walk(acep, cookie, aclcnt, &flags, &type, &mask))) {
switch (flags & ACE_TYPE_FLAGS) {
case ACE_OWNER:
case ACE_GROUP|ACE_IDENTIFIER_GROUP:
case ACE_EVERYONE:
break;
default:
return (1);
}
if (flags & (ACE_FILE_INHERIT_ACE|
ACE_DIRECTORY_INHERIT_ACE|ACE_NO_PROPAGATE_INHERIT_ACE|
ACE_INHERIT_ONLY_ACE))
return (1);
/*
* Special check for some special bits
*
* Don't allow anybody to deny reading basic
* attributes or a files ACL.
*/
if ((mask & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) &&
(type == ACE_ACCESS_DENIED_ACE_TYPE))
return (1);
/*
* Delete permission is never set by default
*/
if (mask & ACE_DELETE)
return (1);
/*
* Child delete permission should be accompanied by write
*/
if ((mask & ACE_DELETE_CHILD) && !(mask & ACE_WRITE_DATA))
return (1);
/*
* only allow owner@ to have
* write_acl/write_owner/write_attributes/write_xattr/
*/
if (type == ACE_ACCESS_ALLOWED_ACE_TYPE &&
(!(flags & ACE_OWNER) && (mask &
(ACE_WRITE_OWNER|ACE_WRITE_ACL| ACE_WRITE_ATTRIBUTES|
ACE_WRITE_NAMED_ATTRS))))
return (1);
}
return (0);
}
/*
* common code for setting ACLs.
*
* This function is called from zfs_mode_update, zfs_perm_init, and zfs_setacl.
* zfs_setacl passes a non-NULL inherit pointer (ihp) to indicate that it's
* already checked the acl and knows whether to inherit.
*/
int
zfs_aclset_common(znode_t *zp, zfs_acl_t *aclp, cred_t *cr, dmu_tx_t *tx)
{
int error;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_object_type_t otype;
zfs_acl_locator_cb_t locate = { 0 };
uint64_t mode;
sa_bulk_attr_t bulk[5];
uint64_t ctime[2];
int count = 0;
zfs_acl_phys_t acl_phys;
mode = zp->z_mode;
mode = zfs_mode_compute(mode, aclp, &zp->z_pflags,
KUID_TO_SUID(ZTOI(zp)->i_uid), KGID_TO_SGID(ZTOI(zp)->i_gid));
zp->z_mode = ZTOI(zp)->i_mode = mode;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, sizeof (ctime));
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
/*
* Upgrade needed?
*/
if (!zfsvfs->z_use_fuids) {
otype = DMU_OT_OLDACL;
} else {
if ((aclp->z_version == ZFS_ACL_VERSION_INITIAL) &&
(zfsvfs->z_version >= ZPL_VERSION_FUID))
zfs_acl_xform(zp, aclp, cr);
ASSERT(aclp->z_version >= ZFS_ACL_VERSION_FUID);
otype = DMU_OT_ACL;
}
/*
* Arrgh, we have to handle old on disk format
* as well as newer (preferred) SA format.
*/
if (zp->z_is_sa) { /* the easy case, just update the ACL attribute */
locate.cb_aclp = aclp;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate, aclp->z_acl_bytes);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_DACL_COUNT(zfsvfs),
NULL, &aclp->z_acl_count, sizeof (uint64_t));
} else { /* Painful legacy way */
zfs_acl_node_t *aclnode;
uint64_t off = 0;
uint64_t aoid;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
&acl_phys, sizeof (acl_phys))) != 0)
return (error);
aoid = acl_phys.z_acl_extern_obj;
if (aclp->z_acl_bytes > ZFS_ACE_SPACE) {
/*
* If ACL was previously external and we are now
* converting to new ACL format then release old
* ACL object and create a new one.
*/
if (aoid &&
aclp->z_version != acl_phys.z_acl_version) {
error = dmu_object_free(zfsvfs->z_os, aoid, tx);
if (error)
return (error);
aoid = 0;
}
if (aoid == 0) {
aoid = dmu_object_alloc(zfsvfs->z_os,
otype, aclp->z_acl_bytes,
otype == DMU_OT_ACL ?
DMU_OT_SYSACL : DMU_OT_NONE,
otype == DMU_OT_ACL ?
DN_OLD_MAX_BONUSLEN : 0, tx);
} else {
(void) dmu_object_set_blocksize(zfsvfs->z_os,
aoid, aclp->z_acl_bytes, 0, tx);
}
acl_phys.z_acl_extern_obj = aoid;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
dmu_write(zfsvfs->z_os, aoid, off,
aclnode->z_size, aclnode->z_acldata, tx);
off += aclnode->z_size;
}
} else {
void *start = acl_phys.z_ace_data;
/*
* Migrating back embedded?
*/
if (acl_phys.z_acl_extern_obj) {
error = dmu_object_free(zfsvfs->z_os,
acl_phys.z_acl_extern_obj, tx);
if (error)
return (error);
acl_phys.z_acl_extern_obj = 0;
}
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
if (aclnode->z_ace_count == 0)
continue;
bcopy(aclnode->z_acldata, start,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
}
/*
* If Old version then swap count/bytes to match old
* layout of znode_acl_phys_t.
*/
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
acl_phys.z_acl_size = aclp->z_acl_count;
acl_phys.z_acl_count = aclp->z_acl_bytes;
} else {
acl_phys.z_acl_size = aclp->z_acl_bytes;
acl_phys.z_acl_count = aclp->z_acl_count;
}
acl_phys.z_acl_version = aclp->z_version;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (acl_phys));
}
/*
* Replace ACL wide bits, but first clear them.
*/
zp->z_pflags &= ~ZFS_ACL_WIDE_FLAGS;
zp->z_pflags |= aclp->z_hints;
if (ace_trivial_common(aclp, 0, zfs_ace_walk) == 0)
zp->z_pflags |= ZFS_ACL_TRIVIAL;
zfs_tstamp_update_setup(zp, STATE_CHANGED, NULL, ctime);
return (sa_bulk_update(zp->z_sa_hdl, bulk, count, tx));
}
static void
zfs_acl_chmod(boolean_t isdir, uint64_t mode, boolean_t split, boolean_t trim,
zfs_acl_t *aclp)
{
void *acep = NULL;
uint64_t who;
int new_count, new_bytes;
int ace_size;
int entry_type;
uint16_t iflags, type;
uint32_t access_mask;
zfs_acl_node_t *newnode;
size_t abstract_size = aclp->z_ops->ace_abstract_size();
void *zacep;
trivial_acl_t masks;
new_count = new_bytes = 0;
acl_trivial_access_masks((mode_t)mode, isdir, &masks);
newnode = zfs_acl_node_alloc((abstract_size * 6) + aclp->z_acl_bytes);
zacep = newnode->z_acldata;
if (masks.allow0) {
zfs_set_ace(aclp, zacep, masks.allow0, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny1) {
zfs_set_ace(aclp, zacep, masks.deny1, DENY, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
if (masks.deny2) {
zfs_set_ace(aclp, zacep, masks.deny2, DENY, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
new_count++;
new_bytes += abstract_size;
}
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
entry_type = (iflags & ACE_TYPE_FLAGS);
/*
* ACEs used to represent the file mode may be divided
* into an equivalent pair of inherit-only and regular
* ACEs, if they are inheritable.
* Skip regular ACEs, which are replaced by the new mode.
*/
if (split && (entry_type == ACE_OWNER ||
entry_type == OWNING_GROUP ||
entry_type == ACE_EVERYONE)) {
if (!isdir || !(iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
continue;
/*
* We preserve owner@, group@, or @everyone
* permissions, if they are inheritable, by
* copying them to inherit_only ACEs. This
* prevents inheritable permissions from being
* altered along with the file mode.
*/
iflags |= ACE_INHERIT_ONLY_ACE;
}
/*
* If this ACL has any inheritable ACEs, mark that in
* the hints (which are later masked into the pflags)
* so create knows to do inheritance.
*/
if (isdir && (iflags &
(ACE_FILE_INHERIT_ACE|ACE_DIRECTORY_INHERIT_ACE)))
aclp->z_hints |= ZFS_INHERIT_ACE;
if ((type != ALLOW && type != DENY) ||
(iflags & ACE_INHERIT_ONLY_ACE)) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
aclp->z_hints |= ZFS_ACL_OBJ_ACE;
break;
}
} else {
/*
* Limit permissions to be no greater than
* group permissions.
* The "aclinherit" and "aclmode" properties
* affect policy for create and chmod(2),
* respectively.
*/
if ((type == ALLOW) && trim)
access_mask &= masks.group;
}
zfs_set_ace(aclp, zacep, access_mask, type, who, iflags);
ace_size = aclp->z_ops->ace_size(acep);
zacep = (void *)((uintptr_t)zacep + ace_size);
new_count++;
new_bytes += ace_size;
}
zfs_set_ace(aclp, zacep, masks.owner, ALLOW, -1, ACE_OWNER);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.group, ALLOW, -1, OWNING_GROUP);
zacep = (void *)((uintptr_t)zacep + abstract_size);
zfs_set_ace(aclp, zacep, masks.everyone, ALLOW, -1, ACE_EVERYONE);
new_count += 3;
new_bytes += abstract_size * 3;
zfs_acl_release_nodes(aclp);
aclp->z_acl_count = new_count;
aclp->z_acl_bytes = new_bytes;
newnode->z_ace_count = new_count;
newnode->z_size = new_bytes;
list_insert_tail(&aclp->z_acl, newnode);
}
int
zfs_acl_chmod_setattr(znode_t *zp, zfs_acl_t **aclp, uint64_t mode)
{
int error = 0;
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
if (ZTOZSB(zp)->z_acl_mode == ZFS_ACL_DISCARD)
*aclp = zfs_acl_alloc(zfs_acl_version_zp(zp));
else
error = zfs_acl_node_read(zp, B_TRUE, aclp, B_TRUE);
if (error == 0) {
(*aclp)->z_hints = zp->z_pflags & V4_ACL_WIDE_FLAGS;
zfs_acl_chmod(S_ISDIR(ZTOI(zp)->i_mode), mode, B_TRUE,
(ZTOZSB(zp)->z_acl_mode == ZFS_ACL_GROUPMASK), *aclp);
}
mutex_exit(&zp->z_lock);
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Should ACE be inherited?
*/
static int
zfs_ace_can_use(umode_t obj_mode, uint16_t acep_flags)
{
int iflags = (acep_flags & 0xf);
if (S_ISDIR(obj_mode) && (iflags & ACE_DIRECTORY_INHERIT_ACE))
return (1);
else if (iflags & ACE_FILE_INHERIT_ACE)
return (!(S_ISDIR(obj_mode) &&
(iflags & ACE_NO_PROPAGATE_INHERIT_ACE)));
return (0);
}
/*
* inherit inheritable ACEs from parent
*/
static zfs_acl_t *
zfs_acl_inherit(zfsvfs_t *zfsvfs, umode_t va_mode, zfs_acl_t *paclp,
uint64_t mode, boolean_t *need_chmod)
{
void *pacep = NULL;
void *acep;
zfs_acl_node_t *aclnode;
zfs_acl_t *aclp = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t iflags, newflags, type;
size_t ace_size;
void *data1, *data2;
size_t data1sz, data2sz;
uint_t aclinherit;
boolean_t isdir = S_ISDIR(va_mode);
boolean_t isreg = S_ISREG(va_mode);
*need_chmod = B_TRUE;
aclp = zfs_acl_alloc(paclp->z_version);
aclinherit = zfsvfs->z_acl_inherit;
if (aclinherit == ZFS_ACL_DISCARD || S_ISLNK(va_mode))
return (aclp);
while ((pacep = zfs_acl_next_ace(paclp, pacep, &who,
&access_mask, &iflags, &type))) {
/*
* don't inherit bogus ACEs
*/
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
/*
* Check if ACE is inheritable by this vnode
*/
if ((aclinherit == ZFS_ACL_NOALLOW && type == ALLOW) ||
!zfs_ace_can_use(va_mode, iflags))
continue;
/*
* If owner@, group@, or everyone@ inheritable
* then zfs_acl_chmod() isn't needed.
*/
if ((aclinherit == ZFS_ACL_PASSTHROUGH ||
aclinherit == ZFS_ACL_PASSTHROUGH_X) &&
((iflags & (ACE_OWNER|ACE_EVERYONE)) ||
((iflags & OWNING_GROUP) == OWNING_GROUP)) &&
(isreg || (isdir && (iflags & ACE_DIRECTORY_INHERIT_ACE))))
*need_chmod = B_FALSE;
/*
* Strip inherited execute permission from file if
* not in mode
*/
if (aclinherit == ZFS_ACL_PASSTHROUGH_X && type == ALLOW &&
!isdir && ((mode & (S_IXUSR|S_IXGRP|S_IXOTH)) == 0)) {
access_mask &= ~ACE_EXECUTE;
}
/*
* Strip write_acl and write_owner from permissions
* when inheriting an ACE
*/
if (aclinherit == ZFS_ACL_RESTRICTED && type == ALLOW) {
access_mask &= ~RESTRICTED_CLEAR;
}
ace_size = aclp->z_ops->ace_size(pacep);
aclnode = zfs_acl_node_alloc(ace_size);
list_insert_tail(&aclp->z_acl, aclnode);
acep = aclnode->z_acldata;
zfs_set_ace(aclp, acep, access_mask, type,
who, iflags|ACE_INHERITED_ACE);
/*
* Copy special opaque data if any
*/
if ((data1sz = paclp->z_ops->ace_data(pacep, &data1)) != 0) {
VERIFY((data2sz = aclp->z_ops->ace_data(acep,
&data2)) == data1sz);
bcopy(data1, data2, data2sz);
}
aclp->z_acl_count++;
aclnode->z_ace_count++;
aclp->z_acl_bytes += aclnode->z_size;
newflags = aclp->z_ops->ace_flags_get(acep);
/*
* If ACE is not to be inherited further, or if the vnode is
* not a directory, remove all inheritance flags
*/
if (!isdir || (iflags & ACE_NO_PROPAGATE_INHERIT_ACE)) {
newflags &= ~ALL_INHERIT;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
continue;
}
/*
* This directory has an inheritable ACE
*/
aclp->z_hints |= ZFS_INHERIT_ACE;
/*
* If only FILE_INHERIT is set then turn on
* inherit_only
*/
if ((iflags & (ACE_FILE_INHERIT_ACE |
ACE_DIRECTORY_INHERIT_ACE)) == ACE_FILE_INHERIT_ACE) {
newflags |= ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
} else {
newflags &= ~ACE_INHERIT_ONLY_ACE;
aclp->z_ops->ace_flags_set(acep,
newflags|ACE_INHERITED_ACE);
}
}
if (zfsvfs->z_acl_mode == ZFS_ACL_RESTRICTED &&
aclp->z_acl_count != 0) {
*need_chmod = B_FALSE;
}
return (aclp);
}
/*
* Create file system object initial permissions
* including inheritable ACEs.
* Also, create FUIDs for owner and group.
*/
int
zfs_acl_ids_create(znode_t *dzp, int flag, vattr_t *vap, cred_t *cr,
vsecattr_t *vsecp, zfs_acl_ids_t *acl_ids)
{
int error;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
zfs_acl_t *paclp;
gid_t gid = vap->va_gid;
boolean_t need_chmod = B_TRUE;
boolean_t trim = B_FALSE;
boolean_t inherited = B_FALSE;
bzero(acl_ids, sizeof (zfs_acl_ids_t));
acl_ids->z_mode = vap->va_mode;
if (vsecp)
if ((error = zfs_vsec_2_aclp(zfsvfs, vap->va_mode, vsecp,
cr, &acl_ids->z_fuidp, &acl_ids->z_aclp)) != 0)
return (error);
acl_ids->z_fuid = vap->va_uid;
acl_ids->z_fgid = vap->va_gid;
#ifdef HAVE_KSID
/*
* Determine uid and gid.
*/
if ((flag & IS_ROOT_NODE) || zfsvfs->z_replay ||
((flag & IS_XATTR) && (S_ISDIR(vap->va_mode)))) {
acl_ids->z_fuid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_uid,
cr, ZFS_OWNER, &acl_ids->z_fuidp);
acl_ids->z_fgid = zfs_fuid_create(zfsvfs, (uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
} else {
acl_ids->z_fuid = zfs_fuid_create_cred(zfsvfs, ZFS_OWNER,
cr, &acl_ids->z_fuidp);
acl_ids->z_fgid = 0;
if (vap->va_mask & AT_GID) {
acl_ids->z_fgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid,
cr, ZFS_GROUP, &acl_ids->z_fuidp);
gid = vap->va_gid;
if (acl_ids->z_fgid != KGID_TO_SGID(ZTOI(dzp)->i_gid) &&
!groupmember(vap->va_gid, cr) &&
secpolicy_vnode_create_gid(cr) != 0)
acl_ids->z_fgid = 0;
}
if (acl_ids->z_fgid == 0) {
if (dzp->z_mode & S_ISGID) {
char *domain;
uint32_t rid;
acl_ids->z_fgid = KGID_TO_SGID(
ZTOI(dzp)->i_gid);
gid = zfs_fuid_map_id(zfsvfs, acl_ids->z_fgid,
cr, ZFS_GROUP);
if (zfsvfs->z_use_fuids &&
IS_EPHEMERAL(acl_ids->z_fgid)) {
domain = zfs_fuid_idx_domain(
&zfsvfs->z_fuid_idx,
FUID_INDEX(acl_ids->z_fgid));
rid = FUID_RID(acl_ids->z_fgid);
zfs_fuid_node_add(&acl_ids->z_fuidp,
domain, rid,
FUID_INDEX(acl_ids->z_fgid),
acl_ids->z_fgid, ZFS_GROUP);
}
} else {
acl_ids->z_fgid = zfs_fuid_create_cred(zfsvfs,
ZFS_GROUP, cr, &acl_ids->z_fuidp);
gid = crgetgid(cr);
}
}
}
#endif /* HAVE_KSID */
/*
* If we're creating a directory, and the parent directory has the
* set-GID bit set, set in on the new directory.
* Otherwise, if the user is neither privileged nor a member of the
* file's new group, clear the file's set-GID bit.
*/
if (!(flag & IS_ROOT_NODE) && (dzp->z_mode & S_ISGID) &&
(S_ISDIR(vap->va_mode))) {
acl_ids->z_mode |= S_ISGID;
} else {
if ((acl_ids->z_mode & S_ISGID) &&
secpolicy_vnode_setids_setgids(cr, gid) != 0)
acl_ids->z_mode &= ~S_ISGID;
}
if (acl_ids->z_aclp == NULL) {
mutex_enter(&dzp->z_acl_lock);
mutex_enter(&dzp->z_lock);
if (!(flag & IS_ROOT_NODE) &&
(dzp->z_pflags & ZFS_INHERIT_ACE) &&
!(dzp->z_pflags & ZFS_XATTR)) {
VERIFY(0 == zfs_acl_node_read(dzp, B_TRUE,
&paclp, B_FALSE));
acl_ids->z_aclp = zfs_acl_inherit(zfsvfs,
vap->va_mode, paclp, acl_ids->z_mode, &need_chmod);
inherited = B_TRUE;
} else {
acl_ids->z_aclp =
zfs_acl_alloc(zfs_acl_version_zp(dzp));
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
mutex_exit(&dzp->z_lock);
mutex_exit(&dzp->z_acl_lock);
if (need_chmod) {
if (S_ISDIR(vap->va_mode))
acl_ids->z_aclp->z_hints |=
ZFS_ACL_AUTO_INHERIT;
if (zfsvfs->z_acl_mode == ZFS_ACL_GROUPMASK &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH &&
zfsvfs->z_acl_inherit != ZFS_ACL_PASSTHROUGH_X)
trim = B_TRUE;
zfs_acl_chmod(vap->va_mode, acl_ids->z_mode, B_FALSE,
trim, acl_ids->z_aclp);
}
}
if (inherited || vsecp) {
acl_ids->z_mode = zfs_mode_compute(acl_ids->z_mode,
acl_ids->z_aclp, &acl_ids->z_aclp->z_hints,
acl_ids->z_fuid, acl_ids->z_fgid);
if (ace_trivial_common(acl_ids->z_aclp, 0, zfs_ace_walk) == 0)
acl_ids->z_aclp->z_hints |= ZFS_ACL_TRIVIAL;
}
return (0);
}
/*
* Free ACL and fuid_infop, but not the acl_ids structure
*/
void
zfs_acl_ids_free(zfs_acl_ids_t *acl_ids)
{
if (acl_ids->z_aclp)
zfs_acl_free(acl_ids->z_aclp);
if (acl_ids->z_fuidp)
zfs_fuid_info_free(acl_ids->z_fuidp);
acl_ids->z_aclp = NULL;
acl_ids->z_fuidp = NULL;
}
boolean_t
zfs_acl_ids_overquota(zfsvfs_t *zv, zfs_acl_ids_t *acl_ids, uint64_t projid)
{
return (zfs_id_overquota(zv, DMU_USERUSED_OBJECT, acl_ids->z_fuid) ||
zfs_id_overquota(zv, DMU_GROUPUSED_OBJECT, acl_ids->z_fgid) ||
(projid != ZFS_DEFAULT_PROJID && projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zv, DMU_PROJECTUSED_OBJECT, projid)));
}
/*
* Retrieve a file's ACL
*/
int
zfs_getacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfs_acl_t *aclp;
ulong_t mask;
int error;
int count = 0;
int largeace = 0;
mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT |
VSA_ACE_ACLFLAGS | VSA_ACE_ALLTYPES);
if (mask == 0)
return (SET_ERROR(ENOSYS));
if ((error = zfs_zaccess(zp, ACE_READ_ACL, 0, skipaclchk, cr)))
return (error);
mutex_enter(&zp->z_acl_lock);
error = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Scan ACL to determine number of ACEs
*/
if ((zp->z_pflags & ZFS_ACL_OBJ_ACE) && !(mask & VSA_ACE_ALLTYPES)) {
void *zacep = NULL;
uint64_t who;
uint32_t access_mask;
uint16_t type, iflags;
while ((zacep = zfs_acl_next_ace(aclp, zacep,
&who, &access_mask, &iflags, &type))) {
switch (type) {
case ACE_ACCESS_ALLOWED_OBJECT_ACE_TYPE:
case ACE_ACCESS_DENIED_OBJECT_ACE_TYPE:
case ACE_SYSTEM_AUDIT_OBJECT_ACE_TYPE:
case ACE_SYSTEM_ALARM_OBJECT_ACE_TYPE:
largeace++;
continue;
default:
count++;
}
}
vsecp->vsa_aclcnt = count;
} else
count = (int)aclp->z_acl_count;
if (mask & VSA_ACECNT) {
vsecp->vsa_aclcnt = count;
}
if (mask & VSA_ACE) {
size_t aclsz;
aclsz = count * sizeof (ace_t) +
sizeof (ace_object_t) * largeace;
vsecp->vsa_aclentp = kmem_alloc(aclsz, KM_SLEEP);
vsecp->vsa_aclentsz = aclsz;
if (aclp->z_version == ZFS_ACL_VERSION_FUID)
zfs_copy_fuid_2_ace(ZTOZSB(zp), aclp, cr,
vsecp->vsa_aclentp, !(mask & VSA_ACE_ALLTYPES));
else {
zfs_acl_node_t *aclnode;
void *start = vsecp->vsa_aclentp;
for (aclnode = list_head(&aclp->z_acl); aclnode;
aclnode = list_next(&aclp->z_acl, aclnode)) {
bcopy(aclnode->z_acldata, start,
aclnode->z_size);
start = (caddr_t)start + aclnode->z_size;
}
ASSERT((caddr_t)start - (caddr_t)vsecp->vsa_aclentp ==
aclp->z_acl_bytes);
}
}
if (mask & VSA_ACE_ACLFLAGS) {
vsecp->vsa_aclflags = 0;
if (zp->z_pflags & ZFS_ACL_DEFAULTED)
vsecp->vsa_aclflags |= ACL_DEFAULTED;
if (zp->z_pflags & ZFS_ACL_PROTECTED)
vsecp->vsa_aclflags |= ACL_PROTECTED;
if (zp->z_pflags & ZFS_ACL_AUTO_INHERIT)
vsecp->vsa_aclflags |= ACL_AUTO_INHERIT;
}
mutex_exit(&zp->z_acl_lock);
return (0);
}
int
zfs_vsec_2_aclp(zfsvfs_t *zfsvfs, umode_t obj_mode,
vsecattr_t *vsecp, cred_t *cr, zfs_fuid_info_t **fuidp, zfs_acl_t **zaclp)
{
zfs_acl_t *aclp;
zfs_acl_node_t *aclnode;
int aclcnt = vsecp->vsa_aclcnt;
int error;
if (vsecp->vsa_aclcnt > MAX_ACL_ENTRIES || vsecp->vsa_aclcnt <= 0)
return (SET_ERROR(EINVAL));
aclp = zfs_acl_alloc(zfs_acl_version(zfsvfs->z_version));
aclp->z_hints = 0;
aclnode = zfs_acl_node_alloc(aclcnt * sizeof (zfs_object_ace_t));
if (aclp->z_version == ZFS_ACL_VERSION_INITIAL) {
if ((error = zfs_copy_ace_2_oldace(obj_mode, aclp,
(ace_t *)vsecp->vsa_aclentp, aclnode->z_acldata,
aclcnt, &aclnode->z_size)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
} else {
if ((error = zfs_copy_ace_2_fuid(zfsvfs, obj_mode, aclp,
vsecp->vsa_aclentp, aclnode->z_acldata, aclcnt,
&aclnode->z_size, fuidp, cr)) != 0) {
zfs_acl_free(aclp);
zfs_acl_node_free(aclnode);
return (error);
}
}
aclp->z_acl_bytes = aclnode->z_size;
aclnode->z_ace_count = aclcnt;
aclp->z_acl_count = aclcnt;
list_insert_head(&aclp->z_acl, aclnode);
/*
* If flags are being set then add them to z_hints
*/
if (vsecp->vsa_mask & VSA_ACE_ACLFLAGS) {
if (vsecp->vsa_aclflags & ACL_PROTECTED)
aclp->z_hints |= ZFS_ACL_PROTECTED;
if (vsecp->vsa_aclflags & ACL_DEFAULTED)
aclp->z_hints |= ZFS_ACL_DEFAULTED;
if (vsecp->vsa_aclflags & ACL_AUTO_INHERIT)
aclp->z_hints |= ZFS_ACL_AUTO_INHERIT;
}
*zaclp = aclp;
return (0);
}
/*
* Set a file's ACL
*/
int
zfs_setacl(znode_t *zp, vsecattr_t *vsecp, boolean_t skipaclchk, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zilog_t *zilog = zfsvfs->z_log;
ulong_t mask = vsecp->vsa_mask & (VSA_ACE | VSA_ACECNT);
dmu_tx_t *tx;
int error;
zfs_acl_t *aclp;
zfs_fuid_info_t *fuidp = NULL;
boolean_t fuid_dirtied;
uint64_t acl_obj;
if (mask == 0)
return (SET_ERROR(ENOSYS));
if (zp->z_pflags & ZFS_IMMUTABLE)
return (SET_ERROR(EPERM));
if ((error = zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr)))
return (error);
error = zfs_vsec_2_aclp(zfsvfs, ZTOI(zp)->i_mode, vsecp, cr, &fuidp,
&aclp);
if (error)
return (error);
/*
* If ACL wide flags aren't being set then preserve any
* existing flags.
*/
if (!(vsecp->vsa_mask & VSA_ACE_ACLFLAGS)) {
aclp->z_hints |=
(zp->z_pflags & V4_ACL_WIDE_FLAGS);
}
top:
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
/*
* If old version and ACL won't fit in bonus and we aren't
* upgrading then take out necessary DMU holds
*/
if ((acl_obj = zfs_external_acl(zp)) != 0) {
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) <= ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0, aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, aclp->z_acl_bytes);
}
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_NOWAIT);
if (error) {
mutex_exit(&zp->z_acl_lock);
mutex_exit(&zp->z_lock);
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
zfs_acl_free(aclp);
return (error);
}
error = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT(error == 0);
ASSERT(zp->z_acl_cached == NULL);
zp->z_acl_cached = aclp;
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
zfs_log_acl(zilog, tx, zp, vsecp, fuidp);
if (fuidp)
zfs_fuid_info_free(fuidp);
dmu_tx_commit(tx);
mutex_exit(&zp->z_lock);
mutex_exit(&zp->z_acl_lock);
return (error);
}
/*
* Check accesses of interest (AoI) against attributes of the dataset
* such as read-only. Returns zero if no AoI conflict with dataset
* attributes, otherwise an appropriate errno is returned.
*/
static int
zfs_zaccess_dataset_check(znode_t *zp, uint32_t v4_mode)
{
if ((v4_mode & WRITE_MASK) && (zfs_is_readonly(ZTOZSB(zp))) &&
(!Z_ISDEV(ZTOI(zp)->i_mode) ||
(Z_ISDEV(ZTOI(zp)->i_mode) && (v4_mode & WRITE_MASK_ATTRS)))) {
return (SET_ERROR(EROFS));
}
/*
* Intentionally allow ZFS_READONLY through here.
* See zfs_zaccess_common().
*/
if ((v4_mode & WRITE_MASK_DATA) &&
(zp->z_pflags & ZFS_IMMUTABLE)) {
return (SET_ERROR(EPERM));
}
if ((v4_mode & (ACE_DELETE | ACE_DELETE_CHILD)) &&
(zp->z_pflags & ZFS_NOUNLINK)) {
return (SET_ERROR(EPERM));
}
if (((v4_mode & (ACE_READ_DATA|ACE_EXECUTE)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED))) {
return (SET_ERROR(EACCES));
}
return (0);
}
/*
* The primary usage of this function is to loop through all of the
* ACEs in the znode, determining what accesses of interest (AoI) to
* the caller are allowed or denied. The AoI are expressed as bits in
* the working_mode parameter. As each ACE is processed, bits covered
* by that ACE are removed from the working_mode. This removal
* facilitates two things. The first is that when the working mode is
* empty (= 0), we know we've looked at all the AoI. The second is
* that the ACE interpretation rules don't allow a later ACE to undo
* something granted or denied by an earlier ACE. Removing the
* discovered access or denial enforces this rule. At the end of
* processing the ACEs, all AoI that were found to be denied are
* placed into the working_mode, giving the caller a mask of denied
* accesses. Returns:
* 0 if all AoI granted
* EACCES if the denied mask is non-zero
* other error if abnormal failure (e.g., IO error)
*
* A secondary usage of the function is to determine if any of the
* AoI are granted. If an ACE grants any access in
* the working_mode, we immediately short circuit out of the function.
* This mode is chosen by setting anyaccess to B_TRUE. The
* working_mode is not a denied access mask upon exit if the function
* is used in this manner.
*/
static int
zfs_zaccess_aces_check(znode_t *zp, uint32_t *working_mode,
boolean_t anyaccess, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zfs_acl_t *aclp;
int error;
uid_t uid = crgetuid(cr);
uint64_t who;
uint16_t type, iflags;
uint16_t entry_type;
uint32_t access_mask;
uint32_t deny_mask = 0;
zfs_ace_hdr_t *acep = NULL;
boolean_t checkit;
uid_t gowner;
uid_t fowner;
zfs_fuid_map_ids(zp, cr, &fowner, &gowner);
mutex_enter(&zp->z_acl_lock);
error = zfs_acl_node_read(zp, B_FALSE, &aclp, B_FALSE);
if (error != 0) {
mutex_exit(&zp->z_acl_lock);
return (error);
}
ASSERT(zp->z_acl_cached);
while ((acep = zfs_acl_next_ace(aclp, acep, &who, &access_mask,
&iflags, &type))) {
uint32_t mask_matched;
if (!zfs_acl_valid_ace_type(type, iflags))
continue;
if (S_ISDIR(ZTOI(zp)->i_mode) &&
(iflags & ACE_INHERIT_ONLY_ACE))
continue;
/* Skip ACE if it does not affect any AoI */
mask_matched = (access_mask & *working_mode);
if (!mask_matched)
continue;
entry_type = (iflags & ACE_TYPE_FLAGS);
checkit = B_FALSE;
switch (entry_type) {
case ACE_OWNER:
if (uid == fowner)
checkit = B_TRUE;
break;
case OWNING_GROUP:
who = gowner;
- /* FALLTHROUGH */
+ fallthrough;
case ACE_IDENTIFIER_GROUP:
checkit = zfs_groupmember(zfsvfs, who, cr);
break;
case ACE_EVERYONE:
checkit = B_TRUE;
break;
/* USER Entry */
default:
if (entry_type == 0) {
uid_t newid;
newid = zfs_fuid_map_id(zfsvfs, who, cr,
ZFS_ACE_USER);
if (newid != IDMAP_WK_CREATOR_OWNER_UID &&
uid == newid)
checkit = B_TRUE;
break;
} else {
mutex_exit(&zp->z_acl_lock);
return (SET_ERROR(EIO));
}
}
if (checkit) {
if (type == DENY) {
DTRACE_PROBE3(zfs__ace__denies,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
deny_mask |= mask_matched;
} else {
DTRACE_PROBE3(zfs__ace__allows,
znode_t *, zp,
zfs_ace_hdr_t *, acep,
uint32_t, mask_matched);
if (anyaccess) {
mutex_exit(&zp->z_acl_lock);
return (0);
}
}
*working_mode &= ~mask_matched;
}
/* Are we done? */
if (*working_mode == 0)
break;
}
mutex_exit(&zp->z_acl_lock);
/* Put the found 'denies' back on the working mode */
if (deny_mask) {
*working_mode |= deny_mask;
return (SET_ERROR(EACCES));
} else if (*working_mode) {
return (-1);
}
return (0);
}
/*
* Return true if any access whatsoever granted, we don't actually
* care what access is granted.
*/
boolean_t
zfs_has_access(znode_t *zp, cred_t *cr)
{
uint32_t have = ACE_ALL_PERMS;
if (zfs_zaccess_aces_check(zp, &have, B_TRUE, cr) != 0) {
uid_t owner;
owner = zfs_fuid_map_id(ZTOZSB(zp),
KUID_TO_SUID(ZTOI(zp)->i_uid), cr, ZFS_OWNER);
return (secpolicy_vnode_any_access(cr, ZTOI(zp), owner) == 0);
}
return (B_TRUE);
}
static int
zfs_zaccess_common(znode_t *zp, uint32_t v4_mode, uint32_t *working_mode,
boolean_t *check_privs, boolean_t skipaclchk, cred_t *cr)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int err;
*working_mode = v4_mode;
*check_privs = B_TRUE;
/*
* Short circuit empty requests
*/
if (v4_mode == 0 || zfsvfs->z_replay) {
*working_mode = 0;
return (0);
}
if ((err = zfs_zaccess_dataset_check(zp, v4_mode)) != 0) {
*check_privs = B_FALSE;
return (err);
}
/*
* The caller requested that the ACL check be skipped. This
* would only happen if the caller checked VOP_ACCESS() with a
* 32 bit ACE mask and already had the appropriate permissions.
*/
if (skipaclchk) {
*working_mode = 0;
return (0);
}
/*
* Note: ZFS_READONLY represents the "DOS R/O" attribute.
* When that flag is set, we should behave as if write access
* were not granted by anything in the ACL. In particular:
* We _must_ allow writes after opening the file r/w, then
* setting the DOS R/O attribute, and writing some more.
* (Similar to how you can write after fchmod(fd, 0444).)
*
* Therefore ZFS_READONLY is ignored in the dataset check
* above, and checked here as if part of the ACL check.
* Also note: DOS R/O is ignored for directories.
*/
if ((v4_mode & WRITE_MASK_DATA) &&
S_ISDIR(ZTOI(zp)->i_mode) &&
(zp->z_pflags & ZFS_READONLY)) {
return (SET_ERROR(EPERM));
}
return (zfs_zaccess_aces_check(zp, working_mode, B_FALSE, cr));
}
static int
zfs_zaccess_append(znode_t *zp, uint32_t *working_mode, boolean_t *check_privs,
cred_t *cr)
{
if (*working_mode != ACE_WRITE_DATA)
return (SET_ERROR(EACCES));
return (zfs_zaccess_common(zp, ACE_APPEND_DATA, working_mode,
check_privs, B_FALSE, cr));
}
int
zfs_fastaccesschk_execute(znode_t *zdp, cred_t *cr)
{
boolean_t owner = B_FALSE;
boolean_t groupmbr = B_FALSE;
boolean_t is_attr;
uid_t uid = crgetuid(cr);
int error;
if (zdp->z_pflags & ZFS_AV_QUARANTINED)
return (SET_ERROR(EACCES));
is_attr = ((zdp->z_pflags & ZFS_XATTR) &&
(S_ISDIR(ZTOI(zdp)->i_mode)));
if (is_attr)
goto slow;
mutex_enter(&zdp->z_acl_lock);
if (zdp->z_pflags & ZFS_NO_EXECS_DENIED) {
mutex_exit(&zdp->z_acl_lock);
return (0);
}
if (KUID_TO_SUID(ZTOI(zdp)->i_uid) != 0 ||
KGID_TO_SGID(ZTOI(zdp)->i_gid) != 0) {
mutex_exit(&zdp->z_acl_lock);
goto slow;
}
if (uid == KUID_TO_SUID(ZTOI(zdp)->i_uid)) {
owner = B_TRUE;
if (zdp->z_mode & S_IXUSR) {
mutex_exit(&zdp->z_acl_lock);
return (0);
} else {
mutex_exit(&zdp->z_acl_lock);
goto slow;
}
}
if (groupmember(KGID_TO_SGID(ZTOI(zdp)->i_gid), cr)) {
groupmbr = B_TRUE;
if (zdp->z_mode & S_IXGRP) {
mutex_exit(&zdp->z_acl_lock);
return (0);
} else {
mutex_exit(&zdp->z_acl_lock);
goto slow;
}
}
if (!owner && !groupmbr) {
if (zdp->z_mode & S_IXOTH) {
mutex_exit(&zdp->z_acl_lock);
return (0);
}
}
mutex_exit(&zdp->z_acl_lock);
slow:
DTRACE_PROBE(zfs__fastpath__execute__access__miss);
ZFS_ENTER(ZTOZSB(zdp));
error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr);
ZFS_EXIT(ZTOZSB(zdp));
return (error);
}
/*
* Determine whether Access should be granted/denied.
*
* The least priv subsystem is always consulted as a basic privilege
* can define any form of access.
*/
int
zfs_zaccess(znode_t *zp, int mode, int flags, boolean_t skipaclchk, cred_t *cr)
{
uint32_t working_mode;
int error;
int is_attr;
boolean_t check_privs;
znode_t *xzp;
znode_t *check_zp = zp;
mode_t needed_bits;
uid_t owner;
is_attr = ((zp->z_pflags & ZFS_XATTR) && S_ISDIR(ZTOI(zp)->i_mode));
/*
* If attribute then validate against base file
*/
if (is_attr) {
if ((error = zfs_zget(ZTOZSB(zp),
zp->z_xattr_parent, &xzp)) != 0) {
return (error);
}
check_zp = xzp;
/*
* fixup mode to map to xattr perms
*/
if (mode & (ACE_WRITE_DATA|ACE_APPEND_DATA)) {
mode &= ~(ACE_WRITE_DATA|ACE_APPEND_DATA);
mode |= ACE_WRITE_NAMED_ATTRS;
}
if (mode & (ACE_READ_DATA|ACE_EXECUTE)) {
mode &= ~(ACE_READ_DATA|ACE_EXECUTE);
mode |= ACE_READ_NAMED_ATTRS;
}
}
owner = zfs_fuid_map_id(ZTOZSB(zp), KUID_TO_SUID(ZTOI(zp)->i_uid),
cr, ZFS_OWNER);
/*
* Map the bits required to the standard inode flags
* S_IRUSR|S_IWUSR|S_IXUSR in the needed_bits. Map the bits
* mapped by working_mode (currently missing) in missing_bits.
* Call secpolicy_vnode_access2() with (needed_bits & ~checkmode),
* needed_bits.
*/
needed_bits = 0;
working_mode = mode;
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES)) &&
owner == crgetuid(cr))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= S_IRUSR;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
needed_bits |= S_IWUSR;
if (working_mode & ACE_EXECUTE)
needed_bits |= S_IXUSR;
if ((error = zfs_zaccess_common(check_zp, mode, &working_mode,
&check_privs, skipaclchk, cr)) == 0) {
if (is_attr)
zrele(xzp);
return (secpolicy_vnode_access2(cr, ZTOI(zp), owner,
needed_bits, needed_bits));
}
if (error && !check_privs) {
if (is_attr)
zrele(xzp);
return (error);
}
if (error && (flags & V_APPEND)) {
error = zfs_zaccess_append(zp, &working_mode, &check_privs, cr);
}
if (error && check_privs) {
mode_t checkmode = 0;
/*
* First check for implicit owner permission on
* read_acl/read_attributes
*/
error = 0;
ASSERT(working_mode != 0);
if ((working_mode & (ACE_READ_ACL|ACE_READ_ATTRIBUTES) &&
owner == crgetuid(cr)))
working_mode &= ~(ACE_READ_ACL|ACE_READ_ATTRIBUTES);
if (working_mode & (ACE_READ_DATA|ACE_READ_NAMED_ATTRS|
ACE_READ_ACL|ACE_READ_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= S_IRUSR;
if (working_mode & (ACE_WRITE_DATA|ACE_WRITE_NAMED_ATTRS|
ACE_APPEND_DATA|ACE_WRITE_ATTRIBUTES|ACE_SYNCHRONIZE))
checkmode |= S_IWUSR;
if (working_mode & ACE_EXECUTE)
checkmode |= S_IXUSR;
error = secpolicy_vnode_access2(cr, ZTOI(check_zp), owner,
needed_bits & ~checkmode, needed_bits);
if (error == 0 && (working_mode & ACE_WRITE_OWNER))
error = secpolicy_vnode_chown(cr, owner);
if (error == 0 && (working_mode & ACE_WRITE_ACL))
error = secpolicy_vnode_setdac(cr, owner);
if (error == 0 && (working_mode &
(ACE_DELETE|ACE_DELETE_CHILD)))
error = secpolicy_vnode_remove(cr);
if (error == 0 && (working_mode & ACE_SYNCHRONIZE)) {
error = secpolicy_vnode_chown(cr, owner);
}
if (error == 0) {
/*
* See if any bits other than those already checked
* for are still present. If so then return EACCES
*/
if (working_mode & ~(ZFS_CHECKED_MASKS)) {
error = SET_ERROR(EACCES);
}
}
} else if (error == 0) {
error = secpolicy_vnode_access2(cr, ZTOI(zp), owner,
needed_bits, needed_bits);
}
if (is_attr)
zrele(xzp);
return (error);
}
/*
* Translate traditional unix S_IRUSR/S_IWUSR/S_IXUSR mode into
* NFSv4-style ZFS ACL format and call zfs_zaccess()
*/
int
zfs_zaccess_rwx(znode_t *zp, mode_t mode, int flags, cred_t *cr)
{
return (zfs_zaccess(zp, zfs_unix_to_v4(mode >> 6), flags, B_FALSE, cr));
}
/*
* Access function for secpolicy_vnode_setattr
*/
int
zfs_zaccess_unix(znode_t *zp, mode_t mode, cred_t *cr)
{
int v4_mode = zfs_unix_to_v4(mode >> 6);
return (zfs_zaccess(zp, v4_mode, 0, B_FALSE, cr));
}
/* See zfs_zaccess_delete() */
int zfs_write_implies_delete_child = 1;
/*
* Determine whether delete access should be granted.
*
* The following chart outlines how we handle delete permissions which is
* how recent versions of windows (Windows 2008) handles it. The efficiency
* comes from not having to check the parent ACL where the object itself grants
* delete:
*
* -------------------------------------------------------
* | Parent Dir | Target Object Permissions |
* | permissions | |
* -------------------------------------------------------
* | | ACL Allows | ACL Denies| Delete |
* | | Delete | Delete | unspecified|
* -------------------------------------------------------
* | ACL Allows | Permit | Deny * | Permit |
* | DELETE_CHILD | | | |
* -------------------------------------------------------
* | ACL Denies | Permit | Deny | Deny |
* | DELETE_CHILD | | | |
* -------------------------------------------------------
* | ACL specifies | | | |
* | only allow | Permit | Deny * | Permit |
* | write and | | | |
* | execute | | | |
* -------------------------------------------------------
* | ACL denies | | | |
* | write and | Permit | Deny | Deny |
* | execute | | | |
* -------------------------------------------------------
* ^
* |
* Re. execute permission on the directory: if that's missing,
* the vnode lookup of the target will fail before we get here.
*
* Re [*] in the table above: NFSv4 would normally Permit delete for
* these two cells of the matrix.
* See acl.h for notes on which ACE_... flags should be checked for which
* operations. Specifically, the NFSv4 committee recommendation is in
* conflict with the Windows interpretation of DENY ACEs, where DENY ACEs
* should take precedence ahead of ALLOW ACEs.
*
* This implementation always consults the target object's ACL first.
* If a DENY ACE is present on the target object that specifies ACE_DELETE,
* delete access is denied. If an ALLOW ACE with ACE_DELETE is present on
* the target object, access is allowed. If and only if no entries with
* ACE_DELETE are present in the object's ACL, check the container's ACL
* for entries with ACE_DELETE_CHILD.
*
* A summary of the logic implemented from the table above is as follows:
*
* First check for DENY ACEs that apply.
* If either target or container has a deny, EACCES.
*
* Delete access can then be summarized as follows:
* 1: The object to be deleted grants ACE_DELETE, or
* 2: The containing directory grants ACE_DELETE_CHILD.
* In a Windows system, that would be the end of the story.
* In this system, (2) has some complications...
* 2a: "sticky" bit on a directory adds restrictions, and
* 2b: existing ACEs from previous versions of ZFS may
* not carry ACE_DELETE_CHILD where they should, so we
* also allow delete when ACE_WRITE_DATA is granted.
*
* Note: 2b is technically a work-around for a prior bug,
* which hopefully can go away some day. For those who
* no longer need the work around, and for testing, this
* work-around is made conditional via the tunable:
* zfs_write_implies_delete_child
*/
int
zfs_zaccess_delete(znode_t *dzp, znode_t *zp, cred_t *cr)
{
uint32_t wanted_dirperms;
uint32_t dzp_working_mode = 0;
uint32_t zp_working_mode = 0;
int dzp_error, zp_error;
boolean_t dzpcheck_privs;
boolean_t zpcheck_privs;
if (zp->z_pflags & (ZFS_IMMUTABLE | ZFS_NOUNLINK))
return (SET_ERROR(EPERM));
/*
* Case 1:
* If target object grants ACE_DELETE then we are done. This is
* indicated by a return value of 0. For this case we don't worry
* about the sticky bit because sticky only applies to the parent
* directory and this is the child access result.
*
* If we encounter a DENY ACE here, we're also done (EACCES).
* Note that if we hit a DENY ACE here (on the target) it should
* take precedence over a DENY ACE on the container, so that when
* we have more complete auditing support we will be able to
* report an access failure against the specific target.
* (This is part of why we're checking the target first.)
*/
zp_error = zfs_zaccess_common(zp, ACE_DELETE, &zp_working_mode,
&zpcheck_privs, B_FALSE, cr);
if (zp_error == EACCES) {
/* We hit a DENY ACE. */
if (!zpcheck_privs)
return (SET_ERROR(zp_error));
return (secpolicy_vnode_remove(cr));
}
if (zp_error == 0)
return (0);
/*
* Case 2:
* If the containing directory grants ACE_DELETE_CHILD,
* or we're in backward compatibility mode and the
* containing directory has ACE_WRITE_DATA, allow.
* Case 2b is handled with wanted_dirperms.
*/
wanted_dirperms = ACE_DELETE_CHILD;
if (zfs_write_implies_delete_child)
wanted_dirperms |= ACE_WRITE_DATA;
dzp_error = zfs_zaccess_common(dzp, wanted_dirperms,
&dzp_working_mode, &dzpcheck_privs, B_FALSE, cr);
if (dzp_error == EACCES) {
/* We hit a DENY ACE. */
if (!dzpcheck_privs)
return (SET_ERROR(dzp_error));
return (secpolicy_vnode_remove(cr));
}
/*
* Cases 2a, 2b (continued)
*
* Note: dzp_working_mode now contains any permissions
* that were NOT granted. Therefore, if any of the
* wanted_dirperms WERE granted, we will have:
* dzp_working_mode != wanted_dirperms
* We're really asking if ANY of those permissions
* were granted, and if so, grant delete access.
*/
if (dzp_working_mode != wanted_dirperms)
dzp_error = 0;
/*
* dzp_error is 0 if the container granted us permissions to "modify".
* If we do not have permission via one or more ACEs, our current
* privileges may still permit us to modify the container.
*
* dzpcheck_privs is false when i.e. the FS is read-only.
* Otherwise, do privilege checks for the container.
*/
if (dzp_error != 0 && dzpcheck_privs) {
uid_t owner;
/*
* The secpolicy call needs the requested access and
* the current access mode of the container, but it
* only knows about Unix-style modes (VEXEC, VWRITE),
* so this must condense the fine-grained ACE bits into
* Unix modes.
*
* The VEXEC flag is easy, because we know that has
* always been checked before we get here (during the
* lookup of the target vnode). The container has not
* granted us permissions to "modify", so we do not set
* the VWRITE flag in the current access mode.
*/
owner = zfs_fuid_map_id(ZTOZSB(dzp),
KUID_TO_SUID(ZTOI(dzp)->i_uid), cr, ZFS_OWNER);
dzp_error = secpolicy_vnode_access2(cr, ZTOI(dzp),
owner, S_IXUSR, S_IWUSR|S_IXUSR);
}
if (dzp_error != 0) {
/*
* Note: We may have dzp_error = -1 here (from
* zfs_zacess_common). Don't return that.
*/
return (SET_ERROR(EACCES));
}
/*
* At this point, we know that the directory permissions allow
* us to modify, but we still need to check for the additional
* restrictions that apply when the "sticky bit" is set.
*
* Yes, zfs_sticky_remove_access() also checks this bit, but
* checking it here and skipping the call below is nice when
* you're watching all of this with dtrace.
*/
if ((dzp->z_mode & S_ISVTX) == 0)
return (0);
/*
* zfs_sticky_remove_access will succeed if:
* 1. The sticky bit is absent.
* 2. We pass the sticky bit restrictions.
* 3. We have privileges that always allow file removal.
*/
return (zfs_sticky_remove_access(dzp, zp, cr));
}
int
zfs_zaccess_rename(znode_t *sdzp, znode_t *szp, znode_t *tdzp,
znode_t *tzp, cred_t *cr)
{
int add_perm;
int error;
if (szp->z_pflags & ZFS_AV_QUARANTINED)
return (SET_ERROR(EACCES));
add_perm = S_ISDIR(ZTOI(szp)->i_mode) ?
ACE_ADD_SUBDIRECTORY : ACE_ADD_FILE;
/*
* Rename permissions are combination of delete permission +
* add file/subdir permission.
*/
/*
* first make sure we do the delete portion.
*
* If that succeeds then check for add_file/add_subdir permissions
*/
if ((error = zfs_zaccess_delete(sdzp, szp, cr)))
return (error);
/*
* If we have a tzp, see if we can delete it?
*/
if (tzp) {
if ((error = zfs_zaccess_delete(tdzp, tzp, cr)))
return (error);
}
/*
* Now check for add permissions
*/
error = zfs_zaccess(tdzp, add_perm, 0, B_FALSE, cr);
return (error);
}
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
index 6859832ab81c..859c51baffd8 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zfs_znode.c
@@ -1,2255 +1,2255 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2018 by Delphix. All rights reserved.
*/
/* Portions Copyright 2007 Jeremy Teo */
#ifdef _KERNEL
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/sysmacros.h>
#include <sys/mntent.h>
#include <sys/u8_textprep.h>
#include <sys/dsl_dataset.h>
#include <sys/vfs.h>
#include <sys/vnode.h>
#include <sys/file.h>
#include <sys/kmem.h>
#include <sys/errno.h>
#include <sys/atomic.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_rlock.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_vnops.h>
#include <sys/zfs_ctldir.h>
#include <sys/dnode.h>
#include <sys/fs/zfs.h>
#include <sys/zpl.h>
#endif /* _KERNEL */
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/zfs_refcount.h>
#include <sys/stat.h>
#include <sys/zap.h>
#include <sys/zfs_znode.h>
#include <sys/sa.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_stat.h>
#include "zfs_prop.h"
#include "zfs_comutil.h"
/*
* Functions needed for userland (ie: libzpool) are not put under
* #ifdef_KERNEL; the rest of the functions have dependencies
* (such as VFS logic) that will not compile easily in userland.
*/
#ifdef _KERNEL
static kmem_cache_t *znode_cache = NULL;
static kmem_cache_t *znode_hold_cache = NULL;
unsigned int zfs_object_mutex_size = ZFS_OBJ_MTX_SZ;
/*
* This is used by the test suite so that it can delay znodes from being
* freed in order to inspect the unlinked set.
*/
int zfs_unlink_suspend_progress = 0;
/*
* This callback is invoked when acquiring a RL_WRITER or RL_APPEND lock on
* z_rangelock. It will modify the offset and length of the lock to reflect
* znode-specific information, and convert RL_APPEND to RL_WRITER. This is
* called with the rangelock_t's rl_lock held, which avoids races.
*/
static void
zfs_rangelock_cb(zfs_locked_range_t *new, void *arg)
{
znode_t *zp = arg;
/*
* If in append mode, convert to writer and lock starting at the
* current end of file.
*/
if (new->lr_type == RL_APPEND) {
new->lr_offset = zp->z_size;
new->lr_type = RL_WRITER;
}
/*
* If we need to grow the block size then lock the whole file range.
*/
uint64_t end_size = MAX(zp->z_size, new->lr_offset + new->lr_length);
if (end_size > zp->z_blksz && (!ISP2(zp->z_blksz) ||
zp->z_blksz < ZTOZSB(zp)->z_max_blksz)) {
new->lr_offset = 0;
new->lr_length = UINT64_MAX;
}
}
/*ARGSUSED*/
static int
zfs_znode_cache_constructor(void *buf, void *arg, int kmflags)
{
znode_t *zp = buf;
inode_init_once(ZTOI(zp));
list_link_init(&zp->z_link_node);
mutex_init(&zp->z_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_parent_lock, NULL, RW_DEFAULT, NULL);
rw_init(&zp->z_name_lock, NULL, RW_NOLOCKDEP, NULL);
mutex_init(&zp->z_acl_lock, NULL, MUTEX_DEFAULT, NULL);
rw_init(&zp->z_xattr_lock, NULL, RW_DEFAULT, NULL);
zfs_rangelock_init(&zp->z_rangelock, zfs_rangelock_cb, zp);
zp->z_dirlocks = NULL;
zp->z_acl_cached = NULL;
zp->z_xattr_cached = NULL;
zp->z_xattr_parent = 0;
return (0);
}
/*ARGSUSED*/
static void
zfs_znode_cache_destructor(void *buf, void *arg)
{
znode_t *zp = buf;
ASSERT(!list_link_active(&zp->z_link_node));
mutex_destroy(&zp->z_lock);
rw_destroy(&zp->z_parent_lock);
rw_destroy(&zp->z_name_lock);
mutex_destroy(&zp->z_acl_lock);
rw_destroy(&zp->z_xattr_lock);
zfs_rangelock_fini(&zp->z_rangelock);
ASSERT3P(zp->z_dirlocks, ==, NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
}
static int
zfs_znode_hold_cache_constructor(void *buf, void *arg, int kmflags)
{
znode_hold_t *zh = buf;
mutex_init(&zh->zh_lock, NULL, MUTEX_DEFAULT, NULL);
zfs_refcount_create(&zh->zh_refcount);
zh->zh_obj = ZFS_NO_OBJECT;
return (0);
}
static void
zfs_znode_hold_cache_destructor(void *buf, void *arg)
{
znode_hold_t *zh = buf;
mutex_destroy(&zh->zh_lock);
zfs_refcount_destroy(&zh->zh_refcount);
}
void
zfs_znode_init(void)
{
/*
* Initialize zcache. The KMC_SLAB hint is used in order that it be
* backed by kmalloc() when on the Linux slab in order that any
* wait_on_bit() operations on the related inode operate properly.
*/
ASSERT(znode_cache == NULL);
znode_cache = kmem_cache_create("zfs_znode_cache",
sizeof (znode_t), 0, zfs_znode_cache_constructor,
zfs_znode_cache_destructor, NULL, NULL, NULL, KMC_SLAB);
ASSERT(znode_hold_cache == NULL);
znode_hold_cache = kmem_cache_create("zfs_znode_hold_cache",
sizeof (znode_hold_t), 0, zfs_znode_hold_cache_constructor,
zfs_znode_hold_cache_destructor, NULL, NULL, NULL, 0);
}
void
zfs_znode_fini(void)
{
/*
* Cleanup zcache
*/
if (znode_cache)
kmem_cache_destroy(znode_cache);
znode_cache = NULL;
if (znode_hold_cache)
kmem_cache_destroy(znode_hold_cache);
znode_hold_cache = NULL;
}
/*
* The zfs_znode_hold_enter() / zfs_znode_hold_exit() functions are used to
* serialize access to a znode and its SA buffer while the object is being
* created or destroyed. This kind of locking would normally reside in the
* znode itself but in this case that's impossible because the znode and SA
* buffer may not yet exist. Therefore the locking is handled externally
* with an array of mutexes and AVLs trees which contain per-object locks.
*
* In zfs_znode_hold_enter() a per-object lock is created as needed, inserted
* in to the correct AVL tree and finally the per-object lock is held. In
* zfs_znode_hold_exit() the process is reversed. The per-object lock is
* released, removed from the AVL tree and destroyed if there are no waiters.
*
* This scheme has two important properties:
*
* 1) No memory allocations are performed while holding one of the z_hold_locks.
* This ensures evict(), which can be called from direct memory reclaim, will
* never block waiting on a z_hold_locks which just happens to have hashed
* to the same index.
*
* 2) All locks used to serialize access to an object are per-object and never
* shared. This minimizes lock contention without creating a large number
* of dedicated locks.
*
* On the downside it does require znode_lock_t structures to be frequently
* allocated and freed. However, because these are backed by a kmem cache
* and very short lived this cost is minimal.
*/
int
zfs_znode_hold_compare(const void *a, const void *b)
{
const znode_hold_t *zh_a = (const znode_hold_t *)a;
const znode_hold_t *zh_b = (const znode_hold_t *)b;
return (TREE_CMP(zh_a->zh_obj, zh_b->zh_obj));
}
static boolean_t __maybe_unused
zfs_znode_held(zfsvfs_t *zfsvfs, uint64_t obj)
{
znode_hold_t *zh, search;
int i = ZFS_OBJ_HASH(zfsvfs, obj);
boolean_t held;
search.zh_obj = obj;
mutex_enter(&zfsvfs->z_hold_locks[i]);
zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
held = (zh && MUTEX_HELD(&zh->zh_lock)) ? B_TRUE : B_FALSE;
mutex_exit(&zfsvfs->z_hold_locks[i]);
return (held);
}
static znode_hold_t *
zfs_znode_hold_enter(zfsvfs_t *zfsvfs, uint64_t obj)
{
znode_hold_t *zh, *zh_new, search;
int i = ZFS_OBJ_HASH(zfsvfs, obj);
boolean_t found = B_FALSE;
zh_new = kmem_cache_alloc(znode_hold_cache, KM_SLEEP);
zh_new->zh_obj = obj;
search.zh_obj = obj;
mutex_enter(&zfsvfs->z_hold_locks[i]);
zh = avl_find(&zfsvfs->z_hold_trees[i], &search, NULL);
if (likely(zh == NULL)) {
zh = zh_new;
avl_add(&zfsvfs->z_hold_trees[i], zh);
} else {
ASSERT3U(zh->zh_obj, ==, obj);
found = B_TRUE;
}
zfs_refcount_add(&zh->zh_refcount, NULL);
mutex_exit(&zfsvfs->z_hold_locks[i]);
if (found == B_TRUE)
kmem_cache_free(znode_hold_cache, zh_new);
ASSERT(MUTEX_NOT_HELD(&zh->zh_lock));
ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
mutex_enter(&zh->zh_lock);
return (zh);
}
static void
zfs_znode_hold_exit(zfsvfs_t *zfsvfs, znode_hold_t *zh)
{
int i = ZFS_OBJ_HASH(zfsvfs, zh->zh_obj);
boolean_t remove = B_FALSE;
ASSERT(zfs_znode_held(zfsvfs, zh->zh_obj));
ASSERT3S(zfs_refcount_count(&zh->zh_refcount), >, 0);
mutex_exit(&zh->zh_lock);
mutex_enter(&zfsvfs->z_hold_locks[i]);
if (zfs_refcount_remove(&zh->zh_refcount, NULL) == 0) {
avl_remove(&zfsvfs->z_hold_trees[i], zh);
remove = B_TRUE;
}
mutex_exit(&zfsvfs->z_hold_locks[i]);
if (remove == B_TRUE)
kmem_cache_free(znode_hold_cache, zh);
}
dev_t
zfs_cmpldev(uint64_t dev)
{
return (dev);
}
static void
zfs_znode_sa_init(zfsvfs_t *zfsvfs, znode_t *zp,
dmu_buf_t *db, dmu_object_type_t obj_type, sa_handle_t *sa_hdl)
{
ASSERT(zfs_znode_held(zfsvfs, zp->z_id));
mutex_enter(&zp->z_lock);
ASSERT(zp->z_sa_hdl == NULL);
ASSERT(zp->z_acl_cached == NULL);
if (sa_hdl == NULL) {
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, zp,
SA_HDL_SHARED, &zp->z_sa_hdl));
} else {
zp->z_sa_hdl = sa_hdl;
sa_set_userp(sa_hdl, zp);
}
zp->z_is_sa = (obj_type == DMU_OT_SA) ? B_TRUE : B_FALSE;
mutex_exit(&zp->z_lock);
}
void
zfs_znode_dmu_fini(znode_t *zp)
{
ASSERT(zfs_znode_held(ZTOZSB(zp), zp->z_id) || zp->z_unlinked ||
RW_WRITE_HELD(&ZTOZSB(zp)->z_teardown_inactive_lock));
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
}
/*
* Called by new_inode() to allocate a new inode.
*/
int
zfs_inode_alloc(struct super_block *sb, struct inode **ip)
{
znode_t *zp;
zp = kmem_cache_alloc(znode_cache, KM_SLEEP);
*ip = ZTOI(zp);
return (0);
}
/*
* Called in multiple places when an inode should be destroyed.
*/
void
zfs_inode_destroy(struct inode *ip)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
mutex_enter(&zfsvfs->z_znodes_lock);
if (list_link_active(&zp->z_link_node)) {
list_remove(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes--;
}
mutex_exit(&zfsvfs->z_znodes_lock);
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
if (zp->z_xattr_cached) {
nvlist_free(zp->z_xattr_cached);
zp->z_xattr_cached = NULL;
}
kmem_cache_free(znode_cache, zp);
}
static void
zfs_inode_set_ops(zfsvfs_t *zfsvfs, struct inode *ip)
{
uint64_t rdev = 0;
switch (ip->i_mode & S_IFMT) {
case S_IFREG:
ip->i_op = &zpl_inode_operations;
ip->i_fop = &zpl_file_operations;
ip->i_mapping->a_ops = &zpl_address_space_operations;
break;
case S_IFDIR:
ip->i_op = &zpl_dir_inode_operations;
ip->i_fop = &zpl_dir_file_operations;
ITOZ(ip)->z_zn_prefetch = B_TRUE;
break;
case S_IFLNK:
ip->i_op = &zpl_symlink_inode_operations;
break;
/*
* rdev is only stored in a SA only for device files.
*/
case S_IFCHR:
case S_IFBLK:
(void) sa_lookup(ITOZ(ip)->z_sa_hdl, SA_ZPL_RDEV(zfsvfs), &rdev,
sizeof (rdev));
- /* FALLTHROUGH */
+ fallthrough;
case S_IFIFO:
case S_IFSOCK:
init_special_inode(ip, ip->i_mode, rdev);
ip->i_op = &zpl_special_inode_operations;
break;
default:
zfs_panic_recover("inode %llu has invalid mode: 0x%x\n",
(u_longlong_t)ip->i_ino, ip->i_mode);
/* Assume the inode is a file and attempt to continue */
ip->i_mode = S_IFREG | 0644;
ip->i_op = &zpl_inode_operations;
ip->i_fop = &zpl_file_operations;
ip->i_mapping->a_ops = &zpl_address_space_operations;
break;
}
}
static void
zfs_set_inode_flags(znode_t *zp, struct inode *ip)
{
/*
* Linux and Solaris have different sets of file attributes, so we
* restrict this conversion to the intersection of the two.
*/
#ifdef HAVE_INODE_SET_FLAGS
unsigned int flags = 0;
if (zp->z_pflags & ZFS_IMMUTABLE)
flags |= S_IMMUTABLE;
if (zp->z_pflags & ZFS_APPENDONLY)
flags |= S_APPEND;
inode_set_flags(ip, flags, S_IMMUTABLE|S_APPEND);
#else
if (zp->z_pflags & ZFS_IMMUTABLE)
ip->i_flags |= S_IMMUTABLE;
else
ip->i_flags &= ~S_IMMUTABLE;
if (zp->z_pflags & ZFS_APPENDONLY)
ip->i_flags |= S_APPEND;
else
ip->i_flags &= ~S_APPEND;
#endif
}
/*
* Update the embedded inode given the znode.
*/
void
zfs_znode_update_vfs(znode_t *zp)
{
zfsvfs_t *zfsvfs;
struct inode *ip;
uint32_t blksize;
u_longlong_t i_blocks;
ASSERT(zp != NULL);
zfsvfs = ZTOZSB(zp);
ip = ZTOI(zp);
/* Skip .zfs control nodes which do not exist on disk. */
if (zfsctl_is_node(ip))
return;
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &blksize, &i_blocks);
spin_lock(&ip->i_lock);
ip->i_mode = zp->z_mode;
ip->i_blocks = i_blocks;
i_size_write(ip, zp->z_size);
spin_unlock(&ip->i_lock);
}
/*
* Construct a znode+inode and initialize.
*
* This does not do a call to dmu_set_user() that is
* up to the caller to do, in case you don't want to
* return the znode
*/
static znode_t *
zfs_znode_alloc(zfsvfs_t *zfsvfs, dmu_buf_t *db, int blksz,
dmu_object_type_t obj_type, sa_handle_t *hdl)
{
znode_t *zp;
struct inode *ip;
uint64_t mode;
uint64_t parent;
uint64_t tmp_gen;
uint64_t links;
uint64_t z_uid, z_gid;
uint64_t atime[2], mtime[2], ctime[2], btime[2];
uint64_t projid = ZFS_DEFAULT_PROJID;
sa_bulk_attr_t bulk[12];
int count = 0;
ASSERT(zfsvfs != NULL);
ip = new_inode(zfsvfs->z_sb);
if (ip == NULL)
return (NULL);
zp = ITOZ(ip);
ASSERT(zp->z_dirlocks == NULL);
ASSERT3P(zp->z_acl_cached, ==, NULL);
ASSERT3P(zp->z_xattr_cached, ==, NULL);
zp->z_unlinked = B_FALSE;
zp->z_atime_dirty = B_FALSE;
zp->z_is_mapped = B_FALSE;
zp->z_is_ctldir = B_FALSE;
zp->z_is_stale = B_FALSE;
zp->z_suspended = B_FALSE;
zp->z_sa_hdl = NULL;
zp->z_mapcnt = 0;
zp->z_id = db->db_object;
zp->z_blksz = blksz;
zp->z_seq = 0x7A4653;
zp->z_sync_cnt = 0;
zfs_znode_sa_init(zfsvfs, zp, db, obj_type, hdl);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL, &tmp_gen, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PARENT(zfsvfs), NULL,
&parent, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL, &z_uid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL, &z_gid, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &btime, 16);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count) != 0 || tmp_gen == 0 ||
(dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
(zp->z_pflags & ZFS_PROJID) &&
sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs), &projid, 8) != 0)) {
if (hdl == NULL)
sa_handle_destroy(zp->z_sa_hdl);
zp->z_sa_hdl = NULL;
goto error;
}
zp->z_projid = projid;
zp->z_mode = ip->i_mode = mode;
ip->i_generation = (uint32_t)tmp_gen;
ip->i_blkbits = SPA_MINBLOCKSHIFT;
set_nlink(ip, (uint32_t)links);
zfs_uid_write(ip, z_uid);
zfs_gid_write(ip, z_gid);
zfs_set_inode_flags(zp, ip);
/* Cache the xattr parent id */
if (zp->z_pflags & ZFS_XATTR)
zp->z_xattr_parent = parent;
ZFS_TIME_DECODE(&ip->i_atime, atime);
ZFS_TIME_DECODE(&ip->i_mtime, mtime);
ZFS_TIME_DECODE(&ip->i_ctime, ctime);
ZFS_TIME_DECODE(&zp->z_btime, btime);
ip->i_ino = zp->z_id;
zfs_znode_update_vfs(zp);
zfs_inode_set_ops(zfsvfs, ip);
/*
* The only way insert_inode_locked() can fail is if the ip->i_ino
* number is already hashed for this super block. This can never
* happen because the inode numbers map 1:1 with the object numbers.
*
* Exceptions include rolling back a mounted file system, either
* from the zfs rollback or zfs recv command.
*
* Active inodes are unhashed during the rollback, but since zrele
* can happen asynchronously, we can't guarantee they've been
* unhashed. This can cause hash collisions in unlinked drain
* processing so do not hash unlinked znodes.
*/
if (links > 0)
VERIFY3S(insert_inode_locked(ip), ==, 0);
mutex_enter(&zfsvfs->z_znodes_lock);
list_insert_tail(&zfsvfs->z_all_znodes, zp);
zfsvfs->z_nr_znodes++;
mutex_exit(&zfsvfs->z_znodes_lock);
if (links > 0)
unlock_new_inode(ip);
return (zp);
error:
iput(ip);
return (NULL);
}
/*
* Safely mark an inode dirty. Inodes which are part of a read-only
* file system or snapshot may not be dirtied.
*/
void
zfs_mark_inode_dirty(struct inode *ip)
{
zfsvfs_t *zfsvfs = ITOZSB(ip);
if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
return;
mark_inode_dirty(ip);
}
static uint64_t empty_xattr;
static uint64_t pad[4];
static zfs_acl_phys_t acl_phys;
/*
* Create a new DMU object to hold a zfs znode.
*
* IN: dzp - parent directory for new znode
* vap - file attributes for new znode
* tx - dmu transaction id for zap operations
* cr - credentials of caller
* flag - flags:
* IS_ROOT_NODE - new object will be root
* IS_TMPFILE - new object is of O_TMPFILE
* IS_XATTR - new object is an attribute
* acl_ids - ACL related attributes
*
* OUT: zpp - allocated znode (set to dzp if IS_ROOT_NODE)
*
*/
void
zfs_mknode(znode_t *dzp, vattr_t *vap, dmu_tx_t *tx, cred_t *cr,
uint_t flag, znode_t **zpp, zfs_acl_ids_t *acl_ids)
{
uint64_t crtime[2], atime[2], mtime[2], ctime[2];
uint64_t mode, size, links, parent, pflags;
uint64_t projid = ZFS_DEFAULT_PROJID;
uint64_t rdev = 0;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
dmu_buf_t *db;
inode_timespec_t now;
uint64_t gen, obj;
int bonuslen;
int dnodesize;
sa_handle_t *sa_hdl;
dmu_object_type_t obj_type;
sa_bulk_attr_t *sa_attrs;
int cnt = 0;
zfs_acl_locator_cb_t locate = { 0 };
znode_hold_t *zh;
if (zfsvfs->z_replay) {
obj = vap->va_nodeid;
now = vap->va_ctime; /* see zfs_replay_create() */
gen = vap->va_nblocks; /* ditto */
dnodesize = vap->va_fsid; /* ditto */
} else {
obj = 0;
gethrestime(&now);
gen = dmu_tx_get_txg(tx);
dnodesize = dmu_objset_dnodesize(zfsvfs->z_os);
}
if (dnodesize == 0)
dnodesize = DNODE_MIN_SIZE;
obj_type = zfsvfs->z_use_sa ? DMU_OT_SA : DMU_OT_ZNODE;
bonuslen = (obj_type == DMU_OT_SA) ?
DN_BONUS_SIZE(dnodesize) : ZFS_OLD_ZNODE_PHYS_SIZE;
/*
* Create a new DMU object.
*/
/*
* There's currently no mechanism for pre-reading the blocks that will
* be needed to allocate a new object, so we accept the small chance
* that there will be an i/o error and we will fail one of the
* assertions below.
*/
if (S_ISDIR(vap->va_mode)) {
if (zfsvfs->z_replay) {
VERIFY0(zap_create_claim_norm_dnsize(zfsvfs->z_os, obj,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = zap_create_norm_dnsize(zfsvfs->z_os,
zfsvfs->z_norm, DMU_OT_DIRECTORY_CONTENTS,
obj_type, bonuslen, dnodesize, tx);
}
} else {
if (zfsvfs->z_replay) {
VERIFY0(dmu_object_claim_dnsize(zfsvfs->z_os, obj,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx));
} else {
obj = dmu_object_alloc_dnsize(zfsvfs->z_os,
DMU_OT_PLAIN_FILE_CONTENTS, 0,
obj_type, bonuslen, dnodesize, tx);
}
}
zh = zfs_znode_hold_enter(zfsvfs, obj);
VERIFY0(sa_buf_hold(zfsvfs->z_os, obj, NULL, &db));
/*
* If this is the root, fix up the half-initialized parent pointer
* to reference the just-allocated physical data area.
*/
if (flag & IS_ROOT_NODE) {
dzp->z_id = obj;
}
/*
* If parent is an xattr, so am I.
*/
if (dzp->z_pflags & ZFS_XATTR) {
flag |= IS_XATTR;
}
if (zfsvfs->z_use_fuids)
pflags = ZFS_ARCHIVE | ZFS_AV_MODIFIED;
else
pflags = 0;
if (S_ISDIR(vap->va_mode)) {
size = 2; /* contents ("." and "..") */
links = 2;
} else {
size = 0;
links = (flag & IS_TMPFILE) ? 0 : 1;
}
if (S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))
rdev = vap->va_rdev;
parent = dzp->z_id;
mode = acl_ids->z_mode;
if (flag & IS_XATTR)
pflags |= ZFS_XATTR;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode)) {
/*
* With ZFS_PROJID flag, we can easily know whether there is
* project ID stored on disk or not. See zfs_space_delta_cb().
*/
if (obj_type != DMU_OT_ZNODE &&
dmu_objset_projectquota_enabled(zfsvfs->z_os))
pflags |= ZFS_PROJID;
/*
* Inherit project ID from parent if required.
*/
projid = zfs_inherit_projid(dzp);
if (dzp->z_pflags & ZFS_PROJINHERIT)
pflags |= ZFS_PROJINHERIT;
}
/*
* No execs denied will be determined when zfs_mode_compute() is called.
*/
pflags |= acl_ids->z_aclp->z_hints &
(ZFS_ACL_TRIVIAL|ZFS_INHERIT_ACE|ZFS_ACL_AUTO_INHERIT|
ZFS_ACL_DEFAULTED|ZFS_ACL_PROTECTED);
ZFS_TIME_ENCODE(&now, crtime);
ZFS_TIME_ENCODE(&now, ctime);
if (vap->va_mask & ATTR_ATIME) {
ZFS_TIME_ENCODE(&vap->va_atime, atime);
} else {
ZFS_TIME_ENCODE(&now, atime);
}
if (vap->va_mask & ATTR_MTIME) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
} else {
ZFS_TIME_ENCODE(&now, mtime);
}
/* Now add in all of the "SA" attributes */
VERIFY(0 == sa_handle_get_from_db(zfsvfs->z_os, db, NULL, SA_HDL_SHARED,
&sa_hdl));
/*
* Setup the array of attributes to be replaced/set on the new file
*
* order for DMU_OT_ZNODE is critical since it needs to be constructed
* in the old znode_phys_t format. Don't change this ordering
*/
sa_attrs = kmem_alloc(sizeof (sa_bulk_attr_t) * ZPL_END, KM_SLEEP);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
} else {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MODE(zfsvfs),
NULL, &mode, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_SIZE(zfsvfs),
NULL, &size, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GEN(zfsvfs),
NULL, &gen, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs),
NULL, &acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs),
NULL, &acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PARENT(zfsvfs),
NULL, &parent, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ATIME(zfsvfs),
NULL, &atime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_MTIME(zfsvfs),
NULL, &mtime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CTIME(zfsvfs),
NULL, &ctime, 16);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_CRTIME(zfsvfs),
NULL, &crtime, 16);
}
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_LINKS(zfsvfs), NULL, &links, 8);
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_XATTR(zfsvfs), NULL,
&empty_xattr, 8);
} else if (dmu_objset_projectquota_enabled(zfsvfs->z_os) &&
pflags & ZFS_PROJID) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PROJID(zfsvfs),
NULL, &projid, 8);
}
if (obj_type == DMU_OT_ZNODE ||
(S_ISBLK(vap->va_mode) || S_ISCHR(vap->va_mode))) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_RDEV(zfsvfs),
NULL, &rdev, 8);
}
if (obj_type == DMU_OT_ZNODE) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_FLAGS(zfsvfs),
NULL, &pflags, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_UID(zfsvfs), NULL,
&acl_ids->z_fuid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_GID(zfsvfs), NULL,
&acl_ids->z_fgid, 8);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_PAD(zfsvfs), NULL, pad,
sizeof (uint64_t) * 4);
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_ZNODE_ACL(zfsvfs), NULL,
&acl_phys, sizeof (zfs_acl_phys_t));
} else if (acl_ids->z_aclp->z_version >= ZFS_ACL_VERSION_FUID) {
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_COUNT(zfsvfs), NULL,
&acl_ids->z_aclp->z_acl_count, 8);
locate.cb_aclp = acl_ids->z_aclp;
SA_ADD_BULK_ATTR(sa_attrs, cnt, SA_ZPL_DACL_ACES(zfsvfs),
zfs_acl_data_locator, &locate,
acl_ids->z_aclp->z_acl_bytes);
mode = zfs_mode_compute(mode, acl_ids->z_aclp, &pflags,
acl_ids->z_fuid, acl_ids->z_fgid);
}
VERIFY(sa_replace_all_by_template(sa_hdl, sa_attrs, cnt, tx) == 0);
if (!(flag & IS_ROOT_NODE)) {
/*
* The call to zfs_znode_alloc() may fail if memory is low
* via the call path: alloc_inode() -> inode_init_always() ->
* security_inode_alloc() -> inode_alloc_security(). Since
* the existing code is written such that zfs_mknode() can
* not fail retry until sufficient memory has been reclaimed.
*/
do {
*zpp = zfs_znode_alloc(zfsvfs, db, 0, obj_type, sa_hdl);
} while (*zpp == NULL);
VERIFY(*zpp != NULL);
VERIFY(dzp != NULL);
} else {
/*
* If we are creating the root node, the "parent" we
* passed in is the znode for the root.
*/
*zpp = dzp;
(*zpp)->z_sa_hdl = sa_hdl;
}
(*zpp)->z_pflags = pflags;
(*zpp)->z_mode = ZTOI(*zpp)->i_mode = mode;
(*zpp)->z_dnodesize = dnodesize;
(*zpp)->z_projid = projid;
if (obj_type == DMU_OT_ZNODE ||
acl_ids->z_aclp->z_version < ZFS_ACL_VERSION_FUID) {
VERIFY0(zfs_aclset_common(*zpp, acl_ids->z_aclp, cr, tx));
}
kmem_free(sa_attrs, sizeof (sa_bulk_attr_t) * ZPL_END);
zfs_znode_hold_exit(zfsvfs, zh);
}
/*
* Update in-core attributes. It is assumed the caller will be doing an
* sa_bulk_update to push the changes out.
*/
void
zfs_xvattr_set(znode_t *zp, xvattr_t *xvap, dmu_tx_t *tx)
{
xoptattr_t *xoap;
boolean_t update_inode = B_FALSE;
xoap = xva_getxoptattr(xvap);
ASSERT(xoap);
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
uint64_t times[2];
ZFS_TIME_ENCODE(&xoap->xoa_createtime, times);
(void) sa_update(zp->z_sa_hdl, SA_ZPL_CRTIME(ZTOZSB(zp)),
&times, sizeof (times), tx);
XVA_SET_RTN(xvap, XAT_CREATETIME);
}
if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
ZFS_ATTR_SET(zp, ZFS_READONLY, xoap->xoa_readonly,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_READONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
ZFS_ATTR_SET(zp, ZFS_HIDDEN, xoap->xoa_hidden,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_HIDDEN);
}
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
ZFS_ATTR_SET(zp, ZFS_SYSTEM, xoap->xoa_system,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SYSTEM);
}
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
ZFS_ATTR_SET(zp, ZFS_ARCHIVE, xoap->xoa_archive,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_ARCHIVE);
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
ZFS_ATTR_SET(zp, ZFS_IMMUTABLE, xoap->xoa_immutable,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_IMMUTABLE);
update_inode = B_TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
ZFS_ATTR_SET(zp, ZFS_NOUNLINK, xoap->xoa_nounlink,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
ZFS_ATTR_SET(zp, ZFS_APPENDONLY, xoap->xoa_appendonly,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_APPENDONLY);
update_inode = B_TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
ZFS_ATTR_SET(zp, ZFS_NODUMP, xoap->xoa_nodump,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
ZFS_ATTR_SET(zp, ZFS_OPAQUE, xoap->xoa_opaque,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_OPAQUE);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
ZFS_ATTR_SET(zp, ZFS_AV_QUARANTINED,
xoap->xoa_av_quarantined, zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
ZFS_ATTR_SET(zp, ZFS_AV_MODIFIED, xoap->xoa_av_modified,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
zfs_sa_set_scanstamp(zp, xvap, tx);
XVA_SET_RTN(xvap, XAT_AV_SCANSTAMP);
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
ZFS_ATTR_SET(zp, ZFS_REPARSE, xoap->xoa_reparse,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_REPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
ZFS_ATTR_SET(zp, ZFS_OFFLINE, xoap->xoa_offline,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_OFFLINE);
}
if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
ZFS_ATTR_SET(zp, ZFS_SPARSE, xoap->xoa_sparse,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_SPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
ZFS_ATTR_SET(zp, ZFS_PROJINHERIT, xoap->xoa_projinherit,
zp->z_pflags, tx);
XVA_SET_RTN(xvap, XAT_PROJINHERIT);
}
if (update_inode)
zfs_set_inode_flags(zp, ZTOI(zp));
}
int
zfs_zget(zfsvfs_t *zfsvfs, uint64_t obj_num, znode_t **zpp)
{
dmu_object_info_t doi;
dmu_buf_t *db;
znode_t *zp;
znode_hold_t *zh;
int err;
sa_handle_t *hdl;
*zpp = NULL;
again:
zh = zfs_znode_hold_enter(zfsvfs, obj_num);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_type != DMU_OT_SA &&
(doi.doi_bonus_type != DMU_OT_ZNODE ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EINVAL));
}
hdl = dmu_buf_get_user(db);
if (hdl != NULL) {
zp = sa_get_userdata(hdl);
/*
* Since "SA" does immediate eviction we
* should never find a sa handle that doesn't
* know about the znode.
*/
ASSERT3P(zp, !=, NULL);
mutex_enter(&zp->z_lock);
ASSERT3U(zp->z_id, ==, obj_num);
/*
* If zp->z_unlinked is set, the znode is already marked
* for deletion and should not be discovered. Check this
* after checking igrab() due to fsetxattr() & O_TMPFILE.
*
* If igrab() returns NULL the VFS has independently
* determined the inode should be evicted and has
* called iput_final() to start the eviction process.
* The SA handle is still valid but because the VFS
* requires that the eviction succeed we must drop
* our locks and references to allow the eviction to
* complete. The zfs_zget() may then be retried.
*
* This unlikely case could be optimized by registering
* a sops->drop_inode() callback. The callback would
* need to detect the active SA hold thereby informing
* the VFS that this inode should not be evicted.
*/
if (igrab(ZTOI(zp)) == NULL) {
if (zp->z_unlinked)
err = SET_ERROR(ENOENT);
else
err = SET_ERROR(EAGAIN);
} else {
*zpp = zp;
err = 0;
}
mutex_exit(&zp->z_lock);
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
if (err == EAGAIN) {
/* inode might need this to finish evict */
cond_resched();
goto again;
}
return (err);
}
/*
* Not found create new znode/vnode but only if file exists.
*
* There is a small window where zfs_vget() could
* find this object while a file create is still in
* progress. This is checked for in zfs_znode_alloc()
*
* if zfs_znode_alloc() fails it will drop the hold on the
* bonus buffer.
*/
zp = zfs_znode_alloc(zfsvfs, db, doi.doi_data_block_size,
doi.doi_bonus_type, NULL);
if (zp == NULL) {
err = SET_ERROR(ENOENT);
} else {
*zpp = zp;
}
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
int
zfs_rezget(znode_t *zp)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_object_info_t doi;
dmu_buf_t *db;
uint64_t obj_num = zp->z_id;
uint64_t mode;
uint64_t links;
sa_bulk_attr_t bulk[11];
int err;
int count = 0;
uint64_t gen;
uint64_t z_uid, z_gid;
uint64_t atime[2], mtime[2], ctime[2], btime[2];
uint64_t projid = ZFS_DEFAULT_PROJID;
znode_hold_t *zh;
/*
* skip ctldir, otherwise they will always get invalidated. This will
* cause funny behaviour for the mounted snapdirs. Especially for
* Linux >= 3.18, d_invalidate will detach the mountpoint and prevent
* anyone automount it again as long as someone is still using the
* detached mount.
*/
if (zp->z_is_ctldir)
return (0);
zh = zfs_znode_hold_enter(zfsvfs, obj_num);
mutex_enter(&zp->z_acl_lock);
if (zp->z_acl_cached) {
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = NULL;
}
mutex_exit(&zp->z_acl_lock);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
if (zp->z_xattr_cached) {
nvlist_free(zp->z_xattr_cached);
zp->z_xattr_cached = NULL;
}
rw_exit(&zp->z_xattr_lock);
ASSERT(zp->z_sa_hdl == NULL);
err = sa_buf_hold(zfsvfs->z_os, obj_num, NULL, &db);
if (err) {
zfs_znode_hold_exit(zfsvfs, zh);
return (err);
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_type != DMU_OT_SA &&
(doi.doi_bonus_type != DMU_OT_ZNODE ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t)))) {
sa_buf_rele(db, NULL);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EINVAL));
}
zfs_znode_sa_init(zfsvfs, zp, db, doi.doi_bonus_type, NULL);
/* reload cached values */
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GEN(zfsvfs), NULL,
&gen, sizeof (gen));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, sizeof (zp->z_size));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL,
&links, sizeof (links));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&z_uid, sizeof (z_uid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&z_gid, sizeof (z_gid));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&mode, sizeof (mode));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
&mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
&ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CRTIME(zfsvfs), NULL, &btime, 16);
if (sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EIO));
}
if (dmu_objset_projectquota_enabled(zfsvfs->z_os)) {
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_PROJID(zfsvfs),
&projid, 8);
if (err != 0 && err != ENOENT) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(err));
}
}
zp->z_projid = projid;
zp->z_mode = ZTOI(zp)->i_mode = mode;
zfs_uid_write(ZTOI(zp), z_uid);
zfs_gid_write(ZTOI(zp), z_gid);
ZFS_TIME_DECODE(&ZTOI(zp)->i_atime, atime);
ZFS_TIME_DECODE(&ZTOI(zp)->i_mtime, mtime);
ZFS_TIME_DECODE(&ZTOI(zp)->i_ctime, ctime);
ZFS_TIME_DECODE(&zp->z_btime, btime);
if ((uint32_t)gen != ZTOI(zp)->i_generation) {
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (SET_ERROR(EIO));
}
set_nlink(ZTOI(zp), (uint32_t)links);
zfs_set_inode_flags(zp, ZTOI(zp));
zp->z_blksz = doi.doi_data_block_size;
zp->z_atime_dirty = B_FALSE;
zfs_znode_update_vfs(zp);
/*
* If the file has zero links, then it has been unlinked on the send
* side and it must be in the received unlinked set.
* We call zfs_znode_dmu_fini() now to prevent any accesses to the
* stale data and to prevent automatic removal of the file in
* zfs_zinactive(). The file will be removed either when it is removed
* on the send side and the next incremental stream is received or
* when the unlinked set gets processed.
*/
zp->z_unlinked = (ZTOI(zp)->i_nlink == 0);
if (zp->z_unlinked)
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
return (0);
}
void
zfs_znode_delete(znode_t *zp, dmu_tx_t *tx)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
objset_t *os = zfsvfs->z_os;
uint64_t obj = zp->z_id;
uint64_t acl_obj = zfs_external_acl(zp);
znode_hold_t *zh;
zh = zfs_znode_hold_enter(zfsvfs, obj);
if (acl_obj) {
VERIFY(!zp->z_is_sa);
VERIFY(0 == dmu_object_free(os, acl_obj, tx));
}
VERIFY(0 == dmu_object_free(os, obj, tx));
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
}
void
zfs_zinactive(znode_t *zp)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
uint64_t z_id = zp->z_id;
znode_hold_t *zh;
ASSERT(zp->z_sa_hdl);
/*
* Don't allow a zfs_zget() while were trying to release this znode.
*/
zh = zfs_znode_hold_enter(zfsvfs, z_id);
mutex_enter(&zp->z_lock);
/*
* If this was the last reference to a file with no links, remove
* the file from the file system unless the file system is mounted
* read-only. That can happen, for example, if the file system was
* originally read-write, the file was opened, then unlinked and
* the file system was made read-only before the file was finally
* closed. The file will remain in the unlinked set.
*/
if (zp->z_unlinked) {
ASSERT(!zfsvfs->z_issnap);
if (!zfs_is_readonly(zfsvfs) && !zfs_unlink_suspend_progress) {
mutex_exit(&zp->z_lock);
zfs_znode_hold_exit(zfsvfs, zh);
zfs_rmnode(zp);
return;
}
}
mutex_exit(&zp->z_lock);
zfs_znode_dmu_fini(zp);
zfs_znode_hold_exit(zfsvfs, zh);
}
#if defined(HAVE_INODE_TIMESPEC64_TIMES)
#define zfs_compare_timespec timespec64_compare
#else
#define zfs_compare_timespec timespec_compare
#endif
/*
* Determine whether the znode's atime must be updated. The logic mostly
* duplicates the Linux kernel's relatime_need_update() functionality.
* This function is only called if the underlying filesystem actually has
* atime updates enabled.
*/
boolean_t
zfs_relatime_need_update(const struct inode *ip)
{
inode_timespec_t now;
gethrestime(&now);
/*
* In relatime mode, only update the atime if the previous atime
* is earlier than either the ctime or mtime or if at least a day
* has passed since the last update of atime.
*/
if (zfs_compare_timespec(&ip->i_mtime, &ip->i_atime) >= 0)
return (B_TRUE);
if (zfs_compare_timespec(&ip->i_ctime, &ip->i_atime) >= 0)
return (B_TRUE);
if ((hrtime_t)now.tv_sec - (hrtime_t)ip->i_atime.tv_sec >= 24*60*60)
return (B_TRUE);
return (B_FALSE);
}
/*
* Prepare to update znode time stamps.
*
* IN: zp - znode requiring timestamp update
* flag - ATTR_MTIME, ATTR_CTIME flags
*
* OUT: zp - z_seq
* mtime - new mtime
* ctime - new ctime
*
* Note: We don't update atime here, because we rely on Linux VFS to do
* atime updating.
*/
void
zfs_tstamp_update_setup(znode_t *zp, uint_t flag, uint64_t mtime[2],
uint64_t ctime[2])
{
inode_timespec_t now;
gethrestime(&now);
zp->z_seq++;
if (flag & ATTR_MTIME) {
ZFS_TIME_ENCODE(&now, mtime);
ZFS_TIME_DECODE(&(ZTOI(zp)->i_mtime), mtime);
if (ZTOZSB(zp)->z_use_fuids) {
zp->z_pflags |= (ZFS_ARCHIVE |
ZFS_AV_MODIFIED);
}
}
if (flag & ATTR_CTIME) {
ZFS_TIME_ENCODE(&now, ctime);
ZFS_TIME_DECODE(&(ZTOI(zp)->i_ctime), ctime);
if (ZTOZSB(zp)->z_use_fuids)
zp->z_pflags |= ZFS_ARCHIVE;
}
}
/*
* Grow the block size for a file.
*
* IN: zp - znode of file to free data in.
* size - requested block size
* tx - open transaction.
*
* NOTE: this function assumes that the znode is write locked.
*/
void
zfs_grow_blocksize(znode_t *zp, uint64_t size, dmu_tx_t *tx)
{
int error;
u_longlong_t dummy;
if (size <= zp->z_blksz)
return;
/*
* If the file size is already greater than the current blocksize,
* we will not grow. If there is more than one block in a file,
* the blocksize cannot change.
*/
if (zp->z_blksz && zp->z_size > zp->z_blksz)
return;
error = dmu_object_set_blocksize(ZTOZSB(zp)->z_os, zp->z_id,
size, 0, tx);
if (error == ENOTSUP)
return;
ASSERT0(error);
/* What blocksize did we actually get? */
dmu_object_size_from_db(sa_get_db(zp->z_sa_hdl), &zp->z_blksz, &dummy);
}
/*
* Increase the file length
*
* IN: zp - znode of file to free data in.
* end - new end-of-file
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_extend(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
zfs_locked_range_t *lr;
uint64_t newblksz;
int error;
/*
* We will change zp_size, lock the whole file.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end <= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
if (end > zp->z_blksz &&
(!ISP2(zp->z_blksz) || zp->z_blksz < zfsvfs->z_max_blksz)) {
/*
* We are growing the file past the current block size.
*/
if (zp->z_blksz > ZTOZSB(zp)->z_max_blksz) {
/*
* File's blocksize is already larger than the
* "recordsize" property. Only let it grow to
* the next power of 2.
*/
ASSERT(!ISP2(zp->z_blksz));
newblksz = MIN(end, 1 << highbit64(zp->z_blksz));
} else {
newblksz = MIN(end, ZTOZSB(zp)->z_max_blksz);
}
dmu_tx_hold_write(tx, zp->z_id, 0, newblksz);
} else {
newblksz = 0;
}
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_rangelock_exit(lr);
return (error);
}
if (newblksz)
zfs_grow_blocksize(zp, newblksz, tx);
zp->z_size = end;
VERIFY(0 == sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(ZTOZSB(zp)),
&zp->z_size, sizeof (zp->z_size), tx));
zfs_rangelock_exit(lr);
dmu_tx_commit(tx);
return (0);
}
/*
* zfs_zero_partial_page - Modeled after update_pages() but
* with different arguments and semantics for use by zfs_freesp().
*
* Zeroes a piece of a single page cache entry for zp at offset
* start and length len.
*
* Caller must acquire a range lock on the file for the region
* being zeroed in order that the ARC and page cache stay in sync.
*/
static void
zfs_zero_partial_page(znode_t *zp, uint64_t start, uint64_t len)
{
struct address_space *mp = ZTOI(zp)->i_mapping;
struct page *pp;
int64_t off;
void *pb;
ASSERT((start & PAGE_MASK) == ((start + len - 1) & PAGE_MASK));
off = start & (PAGE_SIZE - 1);
start &= PAGE_MASK;
pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
pb = kmap(pp);
bzero(pb + off, len);
kunmap(pp);
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
mark_page_accessed(pp);
SetPageUptodate(pp);
ClearPageError(pp);
unlock_page(pp);
put_page(pp);
}
}
/*
* Free space in a file.
*
* IN: zp - znode of file to free data in.
* off - start of section to free.
* len - length of section to free.
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_free_range(znode_t *zp, uint64_t off, uint64_t len)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zfs_locked_range_t *lr;
int error;
/*
* Lock the range being freed.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, off, len, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (off >= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
if (off + len > zp->z_size)
len = zp->z_size - off;
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, off, len);
/*
* Zero partial page cache entries. This must be done under a
* range lock in order to keep the ARC and page cache in sync.
*/
if (zp->z_is_mapped) {
loff_t first_page, last_page, page_len;
loff_t first_page_offset, last_page_offset;
/* first possible full page in hole */
first_page = (off + PAGE_SIZE - 1) >> PAGE_SHIFT;
/* last page of hole */
last_page = (off + len) >> PAGE_SHIFT;
/* offset of first_page */
first_page_offset = first_page << PAGE_SHIFT;
/* offset of last_page */
last_page_offset = last_page << PAGE_SHIFT;
/* truncate whole pages */
if (last_page_offset > first_page_offset) {
truncate_inode_pages_range(ZTOI(zp)->i_mapping,
first_page_offset, last_page_offset - 1);
}
/* truncate sub-page ranges */
if (first_page > last_page) {
/* entire punched area within a single page */
zfs_zero_partial_page(zp, off, len);
} else {
/* beginning of punched area at the end of a page */
page_len = first_page_offset - off;
if (page_len > 0)
zfs_zero_partial_page(zp, off, page_len);
/* end of punched area at the beginning of a page */
page_len = off + len - last_page_offset;
if (page_len > 0)
zfs_zero_partial_page(zp, last_page_offset,
page_len);
}
}
zfs_rangelock_exit(lr);
return (error);
}
/*
* Truncate a file
*
* IN: zp - znode of file to free data in.
* end - new end-of-file.
*
* RETURN: 0 on success, error code on failure
*/
static int
zfs_trunc(znode_t *zp, uint64_t end)
{
zfsvfs_t *zfsvfs = ZTOZSB(zp);
dmu_tx_t *tx;
zfs_locked_range_t *lr;
int error;
sa_bulk_attr_t bulk[2];
int count = 0;
/*
* We will change zp_size, lock the whole file.
*/
lr = zfs_rangelock_enter(&zp->z_rangelock, 0, UINT64_MAX, RL_WRITER);
/*
* Nothing to do if file already at desired length.
*/
if (end >= zp->z_size) {
zfs_rangelock_exit(lr);
return (0);
}
error = dmu_free_long_range(zfsvfs->z_os, zp->z_id, end,
DMU_OBJECT_END);
if (error) {
zfs_rangelock_exit(lr);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
zfs_rangelock_exit(lr);
return (error);
}
zp->z_size = end;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs),
NULL, &zp->z_size, sizeof (zp->z_size));
if (end == 0) {
zp->z_pflags &= ~ZFS_SPARSE;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
}
VERIFY(sa_bulk_update(zp->z_sa_hdl, bulk, count, tx) == 0);
dmu_tx_commit(tx);
zfs_rangelock_exit(lr);
return (0);
}
/*
* Free space in a file
*
* IN: zp - znode of file to free data in.
* off - start of range
* len - end of range (0 => EOF)
* flag - current file open mode flags.
* log - TRUE if this action should be logged
*
* RETURN: 0 on success, error code on failure
*/
int
zfs_freesp(znode_t *zp, uint64_t off, uint64_t len, int flag, boolean_t log)
{
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zilog_t *zilog = zfsvfs->z_log;
uint64_t mode;
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int count = 0;
int error;
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs), &mode,
sizeof (mode))) != 0)
return (error);
if (off > zp->z_size) {
error = zfs_extend(zp, off+len);
if (error == 0 && log)
goto log;
goto out;
}
if (len == 0) {
error = zfs_trunc(zp, off);
} else {
if ((error = zfs_free_range(zp, off, len)) == 0 &&
off + len > zp->z_size)
error = zfs_extend(zp, off+len);
}
if (error || !log)
goto out;
log:
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto out;
}
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs),
NULL, &zp->z_pflags, 8);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
ASSERT(error == 0);
zfs_log_truncate(zilog, tx, TX_TRUNCATE, zp, off, len);
dmu_tx_commit(tx);
zfs_znode_update_vfs(zp);
error = 0;
out:
/*
* Truncate the page cache - for file truncate operations, use
* the purpose-built API for truncations. For punching operations,
* the truncation is handled under a range lock in zfs_free_range.
*/
if (len == 0)
truncate_setsize(ZTOI(zp), off);
return (error);
}
void
zfs_create_fs(objset_t *os, cred_t *cr, nvlist_t *zplprops, dmu_tx_t *tx)
{
struct super_block *sb;
zfsvfs_t *zfsvfs;
uint64_t moid, obj, sa_obj, version;
uint64_t sense = ZFS_CASE_SENSITIVE;
uint64_t norm = 0;
nvpair_t *elem;
int size;
int error;
int i;
znode_t *rootzp = NULL;
vattr_t vattr;
znode_t *zp;
zfs_acl_ids_t acl_ids;
/*
* First attempt to create master node.
*/
/*
* In an empty objset, there are no blocks to read and thus
* there can be no i/o errors (which we assert below).
*/
moid = MASTER_NODE_OBJ;
error = zap_create_claim(os, moid, DMU_OT_MASTER_NODE,
DMU_OT_NONE, 0, tx);
ASSERT(error == 0);
/*
* Set starting attributes.
*/
version = zfs_zpl_version_map(spa_version(dmu_objset_spa(os)));
elem = NULL;
while ((elem = nvlist_next_nvpair(zplprops, elem)) != NULL) {
/* For the moment we expect all zpl props to be uint64_ts */
uint64_t val;
char *name;
ASSERT(nvpair_type(elem) == DATA_TYPE_UINT64);
VERIFY(nvpair_value_uint64(elem, &val) == 0);
name = nvpair_name(elem);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_VERSION)) == 0) {
if (val < version)
version = val;
} else {
error = zap_update(os, moid, name, 8, 1, &val, tx);
}
ASSERT(error == 0);
if (strcmp(name, zfs_prop_to_name(ZFS_PROP_NORMALIZE)) == 0)
norm = val;
else if (strcmp(name, zfs_prop_to_name(ZFS_PROP_CASE)) == 0)
sense = val;
}
ASSERT(version != 0);
error = zap_update(os, moid, ZPL_VERSION_STR, 8, 1, &version, tx);
/*
* Create zap object used for SA attribute registration
*/
if (version >= ZPL_VERSION_SA) {
sa_obj = zap_create(os, DMU_OT_SA_MASTER_NODE,
DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_SA_ATTRS, 8, 1, &sa_obj, tx);
ASSERT(error == 0);
} else {
sa_obj = 0;
}
/*
* Create a delete queue.
*/
obj = zap_create(os, DMU_OT_UNLINKED_SET, DMU_OT_NONE, 0, tx);
error = zap_add(os, moid, ZFS_UNLINKED_SET, 8, 1, &obj, tx);
ASSERT(error == 0);
/*
* Create root znode. Create minimal znode/inode/zfsvfs/sb
* to allow zfs_mknode to work.
*/
vattr.va_mask = ATTR_MODE|ATTR_UID|ATTR_GID;
vattr.va_mode = S_IFDIR|0755;
vattr.va_uid = crgetuid(cr);
vattr.va_gid = crgetgid(cr);
rootzp = kmem_cache_alloc(znode_cache, KM_SLEEP);
rootzp->z_unlinked = B_FALSE;
rootzp->z_atime_dirty = B_FALSE;
rootzp->z_is_sa = USE_SA(version, os);
rootzp->z_pflags = 0;
zfsvfs = kmem_zalloc(sizeof (zfsvfs_t), KM_SLEEP);
zfsvfs->z_os = os;
zfsvfs->z_parent = zfsvfs;
zfsvfs->z_version = version;
zfsvfs->z_use_fuids = USE_FUIDS(version, os);
zfsvfs->z_use_sa = USE_SA(version, os);
zfsvfs->z_norm = norm;
sb = kmem_zalloc(sizeof (struct super_block), KM_SLEEP);
sb->s_fs_info = zfsvfs;
ZTOI(rootzp)->i_sb = sb;
error = sa_setup(os, sa_obj, zfs_attr_table, ZPL_END,
&zfsvfs->z_attr_table);
ASSERT(error == 0);
/*
* Fold case on file systems that are always or sometimes case
* insensitive.
*/
if (sense == ZFS_CASE_INSENSITIVE || sense == ZFS_CASE_MIXED)
zfsvfs->z_norm |= U8_TEXTPREP_TOUPPER;
mutex_init(&zfsvfs->z_znodes_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zfsvfs->z_all_znodes, sizeof (znode_t),
offsetof(znode_t, z_link_node));
size = MIN(1 << (highbit64(zfs_object_mutex_size)-1), ZFS_OBJ_MTX_MAX);
zfsvfs->z_hold_size = size;
zfsvfs->z_hold_trees = vmem_zalloc(sizeof (avl_tree_t) * size,
KM_SLEEP);
zfsvfs->z_hold_locks = vmem_zalloc(sizeof (kmutex_t) * size, KM_SLEEP);
for (i = 0; i != size; i++) {
avl_create(&zfsvfs->z_hold_trees[i], zfs_znode_hold_compare,
sizeof (znode_hold_t), offsetof(znode_hold_t, zh_node));
mutex_init(&zfsvfs->z_hold_locks[i], NULL, MUTEX_DEFAULT, NULL);
}
VERIFY(0 == zfs_acl_ids_create(rootzp, IS_ROOT_NODE, &vattr,
cr, NULL, &acl_ids));
zfs_mknode(rootzp, &vattr, tx, cr, IS_ROOT_NODE, &zp, &acl_ids);
ASSERT3P(zp, ==, rootzp);
error = zap_add(os, moid, ZFS_ROOT_OBJ, 8, 1, &rootzp->z_id, tx);
ASSERT(error == 0);
zfs_acl_ids_free(&acl_ids);
atomic_set(&ZTOI(rootzp)->i_count, 0);
sa_handle_destroy(rootzp->z_sa_hdl);
kmem_cache_free(znode_cache, rootzp);
for (i = 0; i != size; i++) {
avl_destroy(&zfsvfs->z_hold_trees[i]);
mutex_destroy(&zfsvfs->z_hold_locks[i]);
}
mutex_destroy(&zfsvfs->z_znodes_lock);
vmem_free(zfsvfs->z_hold_trees, sizeof (avl_tree_t) * size);
vmem_free(zfsvfs->z_hold_locks, sizeof (kmutex_t) * size);
kmem_free(sb, sizeof (struct super_block));
kmem_free(zfsvfs, sizeof (zfsvfs_t));
}
#endif /* _KERNEL */
static int
zfs_sa_setup(objset_t *osp, sa_attr_type_t **sa_table)
{
uint64_t sa_obj = 0;
int error;
error = zap_lookup(osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS, 8, 1, &sa_obj);
if (error != 0 && error != ENOENT)
return (error);
error = sa_setup(osp, sa_obj, zfs_attr_table, ZPL_END, sa_table);
return (error);
}
static int
zfs_grab_sa_handle(objset_t *osp, uint64_t obj, sa_handle_t **hdlp,
dmu_buf_t **db, void *tag)
{
dmu_object_info_t doi;
int error;
if ((error = sa_buf_hold(osp, obj, tag, db)) != 0)
return (error);
dmu_object_info_from_db(*db, &doi);
if ((doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) ||
(doi.doi_bonus_type == DMU_OT_ZNODE &&
doi.doi_bonus_size < sizeof (znode_phys_t))) {
sa_buf_rele(*db, tag);
return (SET_ERROR(ENOTSUP));
}
error = sa_handle_get(osp, obj, NULL, SA_HDL_PRIVATE, hdlp);
if (error != 0) {
sa_buf_rele(*db, tag);
return (error);
}
return (0);
}
static void
zfs_release_sa_handle(sa_handle_t *hdl, dmu_buf_t *db, void *tag)
{
sa_handle_destroy(hdl);
sa_buf_rele(db, tag);
}
/*
* Given an object number, return its parent object number and whether
* or not the object is an extended attribute directory.
*/
static int
zfs_obj_to_pobj(objset_t *osp, sa_handle_t *hdl, sa_attr_type_t *sa_table,
uint64_t *pobjp, int *is_xattrdir)
{
uint64_t parent;
uint64_t pflags;
uint64_t mode;
uint64_t parent_mode;
sa_bulk_attr_t bulk[3];
sa_handle_t *sa_hdl;
dmu_buf_t *sa_db;
int count = 0;
int error;
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_PARENT], NULL,
&parent, sizeof (parent));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_FLAGS], NULL,
&pflags, sizeof (pflags));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
&mode, sizeof (mode));
if ((error = sa_bulk_lookup(hdl, bulk, count)) != 0)
return (error);
/*
* When a link is removed its parent pointer is not changed and will
* be invalid. There are two cases where a link is removed but the
* file stays around, when it goes to the delete queue and when there
* are additional links.
*/
error = zfs_grab_sa_handle(osp, parent, &sa_hdl, &sa_db, FTAG);
if (error != 0)
return (error);
error = sa_lookup(sa_hdl, ZPL_MODE, &parent_mode, sizeof (parent_mode));
zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
if (error != 0)
return (error);
*is_xattrdir = ((pflags & ZFS_XATTR) != 0) && S_ISDIR(mode);
/*
* Extended attributes can be applied to files, directories, etc.
* Otherwise the parent must be a directory.
*/
if (!*is_xattrdir && !S_ISDIR(parent_mode))
return (SET_ERROR(EINVAL));
*pobjp = parent;
return (0);
}
/*
* Given an object number, return some zpl level statistics
*/
static int
zfs_obj_to_stats_impl(sa_handle_t *hdl, sa_attr_type_t *sa_table,
zfs_stat_t *sb)
{
sa_bulk_attr_t bulk[4];
int count = 0;
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_MODE], NULL,
&sb->zs_mode, sizeof (sb->zs_mode));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_GEN], NULL,
&sb->zs_gen, sizeof (sb->zs_gen));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_LINKS], NULL,
&sb->zs_links, sizeof (sb->zs_links));
SA_ADD_BULK_ATTR(bulk, count, sa_table[ZPL_CTIME], NULL,
&sb->zs_ctime, sizeof (sb->zs_ctime));
return (sa_bulk_lookup(hdl, bulk, count));
}
static int
zfs_obj_to_path_impl(objset_t *osp, uint64_t obj, sa_handle_t *hdl,
sa_attr_type_t *sa_table, char *buf, int len)
{
sa_handle_t *sa_hdl;
sa_handle_t *prevhdl = NULL;
dmu_buf_t *prevdb = NULL;
dmu_buf_t *sa_db = NULL;
char *path = buf + len - 1;
int error;
*path = '\0';
sa_hdl = hdl;
uint64_t deleteq_obj;
VERIFY0(zap_lookup(osp, MASTER_NODE_OBJ,
ZFS_UNLINKED_SET, sizeof (uint64_t), 1, &deleteq_obj));
error = zap_lookup_int(osp, deleteq_obj, obj);
if (error == 0) {
return (ESTALE);
} else if (error != ENOENT) {
return (error);
}
error = 0;
for (;;) {
uint64_t pobj = 0;
char component[MAXNAMELEN + 2];
size_t complen;
int is_xattrdir = 0;
if (prevdb) {
ASSERT(prevhdl != NULL);
zfs_release_sa_handle(prevhdl, prevdb, FTAG);
}
if ((error = zfs_obj_to_pobj(osp, sa_hdl, sa_table, &pobj,
&is_xattrdir)) != 0)
break;
if (pobj == obj) {
if (path[0] != '/')
*--path = '/';
break;
}
component[0] = '/';
if (is_xattrdir) {
(void) sprintf(component + 1, "<xattrdir>");
} else {
error = zap_value_search(osp, pobj, obj,
ZFS_DIRENT_OBJ(-1ULL), component + 1);
if (error != 0)
break;
}
complen = strlen(component);
path -= complen;
ASSERT(path >= buf);
bcopy(component, path, complen);
obj = pobj;
if (sa_hdl != hdl) {
prevhdl = sa_hdl;
prevdb = sa_db;
}
error = zfs_grab_sa_handle(osp, obj, &sa_hdl, &sa_db, FTAG);
if (error != 0) {
sa_hdl = prevhdl;
sa_db = prevdb;
break;
}
}
if (sa_hdl != NULL && sa_hdl != hdl) {
ASSERT(sa_db != NULL);
zfs_release_sa_handle(sa_hdl, sa_db, FTAG);
}
if (error == 0)
(void) memmove(buf, path, buf + len - path);
return (error);
}
int
zfs_obj_to_path(objset_t *osp, uint64_t obj, char *buf, int len)
{
sa_attr_type_t *sa_table;
sa_handle_t *hdl;
dmu_buf_t *db;
int error;
error = zfs_sa_setup(osp, &sa_table);
if (error != 0)
return (error);
error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
int
zfs_obj_to_stats(objset_t *osp, uint64_t obj, zfs_stat_t *sb,
char *buf, int len)
{
char *path = buf + len - 1;
sa_attr_type_t *sa_table;
sa_handle_t *hdl;
dmu_buf_t *db;
int error;
*path = '\0';
error = zfs_sa_setup(osp, &sa_table);
if (error != 0)
return (error);
error = zfs_grab_sa_handle(osp, obj, &hdl, &db, FTAG);
if (error != 0)
return (error);
error = zfs_obj_to_stats_impl(hdl, sa_table, sb);
if (error != 0) {
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
error = zfs_obj_to_path_impl(osp, obj, hdl, sa_table, buf, len);
zfs_release_sa_handle(hdl, db, FTAG);
return (error);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(zfs_create_fs);
EXPORT_SYMBOL(zfs_obj_to_path);
/* CSTYLED */
module_param(zfs_object_mutex_size, uint, 0644);
MODULE_PARM_DESC(zfs_object_mutex_size, "Size of znode hold array");
module_param(zfs_unlink_suspend_progress, int, 0644);
MODULE_PARM_DESC(zfs_unlink_suspend_progress, "Set to prevent async unlinks "
"(debug - leaks space into the unlinked set)");
#endif
diff --git a/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c b/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
index 66f197e4c77a..e7726e8458af 100644
--- a/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
+++ b/sys/contrib/openzfs/module/os/linux/zfs/zpl_xattr.c
@@ -1,1497 +1,1515 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, Lawrence Livermore National Security, LLC.
*
* Extended attributes (xattr) on Solaris are implemented as files
* which exist in a hidden xattr directory. These extended attributes
* can be accessed using the attropen() system call which opens
* the extended attribute. It can then be manipulated just like
* a standard file descriptor. This has a couple advantages such
* as practically no size limit on the file, and the extended
* attributes permissions may differ from those of the parent file.
* This interface is really quite clever, but it's also completely
* different than what is supported on Linux. It also comes with a
* steep performance penalty when accessing small xattrs because they
* are not stored with the parent file.
*
* Under Linux extended attributes are manipulated by the system
* calls getxattr(2), setxattr(2), and listxattr(2). They consider
* extended attributes to be name/value pairs where the name is a
* NULL terminated string. The name must also include one of the
* following namespace prefixes:
*
* user - No restrictions and is available to user applications.
* trusted - Restricted to kernel and root (CAP_SYS_ADMIN) use.
* system - Used for access control lists (system.nfs4_acl, etc).
* security - Used by SELinux to store a files security context.
*
* The value under Linux to limited to 65536 bytes of binary data.
* In practice, individual xattrs tend to be much smaller than this
* and are typically less than 100 bytes. A good example of this
* are the security.selinux xattrs which are less than 100 bytes and
* exist for every file when xattr labeling is enabled.
*
* The Linux xattr implementation has been written to take advantage of
* this typical usage. When the dataset property 'xattr=sa' is set,
* then xattrs will be preferentially stored as System Attributes (SA).
* This allows tiny xattrs (~100 bytes) to be stored with the dnode and
* up to 64k of xattrs to be stored in the spill block. If additional
* xattr space is required, which is unlikely under Linux, they will
* be stored using the traditional directory approach.
*
* This optimization results in roughly a 3x performance improvement
* when accessing xattrs because it avoids the need to perform a seek
* for every xattr value. When multiple xattrs are stored per-file
* the performance improvements are even greater because all of the
* xattrs stored in the spill block will be cached.
*
* However, by default SA based xattrs are disabled in the Linux port
* to maximize compatibility with other implementations. If you do
* enable SA based xattrs then they will not be visible on platforms
* which do not support this feature.
*
* NOTE: One additional consequence of the xattr directory implementation
* is that when an extended attribute is manipulated an inode is created.
* This inode will exist in the Linux inode cache but there will be no
* associated entry in the dentry cache which references it. This is
* safe but it may result in some confusion. Enabling SA based xattrs
* largely avoids the issue except in the overflow case.
*/
#include <sys/zfs_znode.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_vnops.h>
#include <sys/zap.h>
#include <sys/vfs.h>
#include <sys/zpl.h>
typedef struct xattr_filldir {
size_t size;
size_t offset;
char *buf;
struct dentry *dentry;
} xattr_filldir_t;
static const struct xattr_handler *zpl_xattr_handler(const char *);
static int
zpl_xattr_permission(xattr_filldir_t *xf, const char *name, int name_len)
{
static const struct xattr_handler *handler;
struct dentry *d = xf->dentry;
handler = zpl_xattr_handler(name);
if (!handler)
return (0);
if (handler->list) {
#if defined(HAVE_XATTR_LIST_SIMPLE)
if (!handler->list(d))
return (0);
#elif defined(HAVE_XATTR_LIST_DENTRY)
if (!handler->list(d, NULL, 0, name, name_len, 0))
return (0);
#elif defined(HAVE_XATTR_LIST_HANDLER)
if (!handler->list(handler, d, NULL, 0, name, name_len))
return (0);
#endif
}
return (1);
}
/*
* Determine is a given xattr name should be visible and if so copy it
* in to the provided buffer (xf->buf).
*/
static int
zpl_xattr_filldir(xattr_filldir_t *xf, const char *name, int name_len)
{
/* Check permissions using the per-namespace list xattr handler. */
if (!zpl_xattr_permission(xf, name, name_len))
return (0);
/* When xf->buf is NULL only calculate the required size. */
if (xf->buf) {
if (xf->offset + name_len + 1 > xf->size)
return (-ERANGE);
memcpy(xf->buf + xf->offset, name, name_len);
xf->buf[xf->offset + name_len] = '\0';
}
xf->offset += (name_len + 1);
return (0);
}
/*
* Read as many directory entry names as will fit in to the provided buffer,
* or when no buffer is provided calculate the required buffer size.
*/
static int
zpl_xattr_readdir(struct inode *dxip, xattr_filldir_t *xf)
{
zap_cursor_t zc;
zap_attribute_t zap;
int error;
zap_cursor_init(&zc, ITOZSB(dxip)->z_os, ITOZ(dxip)->z_id);
while ((error = -zap_cursor_retrieve(&zc, &zap)) == 0) {
if (zap.za_integer_length != 8 || zap.za_num_integers != 1) {
error = -ENXIO;
break;
}
error = zpl_xattr_filldir(xf, zap.za_name, strlen(zap.za_name));
if (error)
break;
zap_cursor_advance(&zc);
}
zap_cursor_fini(&zc);
if (error == -ENOENT)
error = 0;
return (error);
}
static ssize_t
zpl_xattr_list_dir(xattr_filldir_t *xf, cred_t *cr)
{
struct inode *ip = xf->dentry->d_inode;
struct inode *dxip = NULL;
znode_t *dxzp;
int error;
/* Lookup the xattr directory */
error = -zfs_lookup(ITOZ(ip), NULL, &dxzp, LOOKUP_XATTR,
cr, NULL, NULL);
if (error) {
if (error == -ENOENT)
error = 0;
return (error);
}
dxip = ZTOI(dxzp);
error = zpl_xattr_readdir(dxip, xf);
iput(dxip);
return (error);
}
static ssize_t
zpl_xattr_list_sa(xattr_filldir_t *xf)
{
znode_t *zp = ITOZ(xf->dentry->d_inode);
nvpair_t *nvp = NULL;
int error = 0;
mutex_enter(&zp->z_lock);
if (zp->z_xattr_cached == NULL)
error = -zfs_sa_get_xattr(zp);
mutex_exit(&zp->z_lock);
if (error)
return (error);
ASSERT(zp->z_xattr_cached);
while ((nvp = nvlist_next_nvpair(zp->z_xattr_cached, nvp)) != NULL) {
ASSERT3U(nvpair_type(nvp), ==, DATA_TYPE_BYTE_ARRAY);
error = zpl_xattr_filldir(xf, nvpair_name(nvp),
strlen(nvpair_name(nvp)));
if (error)
return (error);
}
return (0);
}
ssize_t
zpl_xattr_list(struct dentry *dentry, char *buffer, size_t buffer_size)
{
znode_t *zp = ITOZ(dentry->d_inode);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
xattr_filldir_t xf = { buffer_size, 0, buffer, dentry };
cred_t *cr = CRED();
fstrans_cookie_t cookie;
int error = 0;
crhold(cr);
cookie = spl_fstrans_mark();
ZPL_ENTER(zfsvfs);
ZPL_VERIFY_ZP(zp);
rw_enter(&zp->z_xattr_lock, RW_READER);
if (zfsvfs->z_use_sa && zp->z_is_sa) {
error = zpl_xattr_list_sa(&xf);
if (error)
goto out;
}
error = zpl_xattr_list_dir(&xf, cr);
if (error)
goto out;
error = xf.offset;
out:
rw_exit(&zp->z_xattr_lock);
ZPL_EXIT(zfsvfs);
spl_fstrans_unmark(cookie);
crfree(cr);
return (error);
}
static int
zpl_xattr_get_dir(struct inode *ip, const char *name, void *value,
size_t size, cred_t *cr)
{
fstrans_cookie_t cookie;
struct inode *xip = NULL;
znode_t *dxzp = NULL;
znode_t *xzp = NULL;
int error;
/* Lookup the xattr directory */
error = -zfs_lookup(ITOZ(ip), NULL, &dxzp, LOOKUP_XATTR,
cr, NULL, NULL);
if (error)
goto out;
/* Lookup a specific xattr name in the directory */
error = -zfs_lookup(dxzp, (char *)name, &xzp, 0, cr, NULL, NULL);
if (error)
goto out;
xip = ZTOI(xzp);
if (!size) {
error = i_size_read(xip);
goto out;
}
if (size < i_size_read(xip)) {
error = -ERANGE;
goto out;
}
struct iovec iov;
iov.iov_base = (void *)value;
iov.iov_len = size;
zfs_uio_t uio;
zfs_uio_iovec_init(&uio, &iov, 1, 0, UIO_SYSSPACE, size, 0);
cookie = spl_fstrans_mark();
error = -zfs_read(ITOZ(xip), &uio, 0, cr);
spl_fstrans_unmark(cookie);
if (error == 0)
error = size - zfs_uio_resid(&uio);
out:
if (xzp)
zrele(xzp);
if (dxzp)
zrele(dxzp);
return (error);
}
static int
zpl_xattr_get_sa(struct inode *ip, const char *name, void *value, size_t size)
{
znode_t *zp = ITOZ(ip);
uchar_t *nv_value;
uint_t nv_size;
int error = 0;
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
mutex_enter(&zp->z_lock);
if (zp->z_xattr_cached == NULL)
error = -zfs_sa_get_xattr(zp);
mutex_exit(&zp->z_lock);
if (error)
return (error);
ASSERT(zp->z_xattr_cached);
error = -nvlist_lookup_byte_array(zp->z_xattr_cached, name,
&nv_value, &nv_size);
if (error)
return (error);
if (size == 0 || value == NULL)
return (nv_size);
if (size < nv_size)
return (-ERANGE);
memcpy(value, nv_value, nv_size);
return (nv_size);
}
static int
__zpl_xattr_get(struct inode *ip, const char *name, void *value, size_t size,
cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
if (zfsvfs->z_use_sa && zp->z_is_sa) {
error = zpl_xattr_get_sa(ip, name, value, size);
if (error != -ENOENT)
goto out;
}
error = zpl_xattr_get_dir(ip, name, value, size, cr);
out:
if (error == -ENOENT)
error = -ENODATA;
return (error);
}
#define XATTR_NOENT 0x0
#define XATTR_IN_SA 0x1
#define XATTR_IN_DIR 0x2
/* check where the xattr resides */
static int
__zpl_xattr_where(struct inode *ip, const char *name, int *where, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
int error;
ASSERT(where);
ASSERT(RW_LOCK_HELD(&zp->z_xattr_lock));
*where = XATTR_NOENT;
if (zfsvfs->z_use_sa && zp->z_is_sa) {
error = zpl_xattr_get_sa(ip, name, NULL, 0);
if (error >= 0)
*where |= XATTR_IN_SA;
else if (error != -ENOENT)
return (error);
}
error = zpl_xattr_get_dir(ip, name, NULL, 0, cr);
if (error >= 0)
*where |= XATTR_IN_DIR;
else if (error != -ENOENT)
return (error);
if (*where == (XATTR_IN_SA|XATTR_IN_DIR))
cmn_err(CE_WARN, "ZFS: inode %p has xattr \"%s\""
" in both SA and dir", ip, name);
if (*where == XATTR_NOENT)
error = -ENODATA;
else
error = 0;
return (error);
}
static int
zpl_xattr_get(struct inode *ip, const char *name, void *value, size_t size)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
cred_t *cr = CRED();
fstrans_cookie_t cookie;
int error;
crhold(cr);
cookie = spl_fstrans_mark();
ZPL_ENTER(zfsvfs);
ZPL_VERIFY_ZP(zp);
rw_enter(&zp->z_xattr_lock, RW_READER);
error = __zpl_xattr_get(ip, name, value, size, cr);
rw_exit(&zp->z_xattr_lock);
ZPL_EXIT(zfsvfs);
spl_fstrans_unmark(cookie);
crfree(cr);
return (error);
}
static int
zpl_xattr_set_dir(struct inode *ip, const char *name, const void *value,
size_t size, int flags, cred_t *cr)
{
znode_t *dxzp = NULL;
znode_t *xzp = NULL;
vattr_t *vap = NULL;
int lookup_flags, error;
const int xattr_mode = S_IFREG | 0644;
loff_t pos = 0;
/*
* Lookup the xattr directory. When we're adding an entry pass
* CREATE_XATTR_DIR to ensure the xattr directory is created.
* When removing an entry this flag is not passed to avoid
* unnecessarily creating a new xattr directory.
*/
lookup_flags = LOOKUP_XATTR;
if (value != NULL)
lookup_flags |= CREATE_XATTR_DIR;
error = -zfs_lookup(ITOZ(ip), NULL, &dxzp, lookup_flags,
cr, NULL, NULL);
if (error)
goto out;
/* Lookup a specific xattr name in the directory */
error = -zfs_lookup(dxzp, (char *)name, &xzp, 0, cr, NULL, NULL);
if (error && (error != -ENOENT))
goto out;
error = 0;
/* Remove a specific name xattr when value is set to NULL. */
if (value == NULL) {
if (xzp)
error = -zfs_remove(dxzp, (char *)name, cr, 0);
goto out;
}
/* Lookup failed create a new xattr. */
if (xzp == NULL) {
vap = kmem_zalloc(sizeof (vattr_t), KM_SLEEP);
vap->va_mode = xattr_mode;
vap->va_mask = ATTR_MODE;
vap->va_uid = crgetfsuid(cr);
vap->va_gid = crgetfsgid(cr);
error = -zfs_create(dxzp, (char *)name, vap, 0, 0644, &xzp,
cr, 0, NULL);
if (error)
goto out;
}
ASSERT(xzp != NULL);
error = -zfs_freesp(xzp, 0, 0, xattr_mode, TRUE);
if (error)
goto out;
error = -zfs_write_simple(xzp, value, size, pos, NULL);
out:
if (error == 0) {
ip->i_ctime = current_time(ip);
zfs_mark_inode_dirty(ip);
}
if (vap)
kmem_free(vap, sizeof (vattr_t));
if (xzp)
zrele(xzp);
if (dxzp)
zrele(dxzp);
if (error == -ENOENT)
error = -ENODATA;
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_xattr_set_sa(struct inode *ip, const char *name, const void *value,
size_t size, int flags, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
nvlist_t *nvl;
size_t sa_size;
int error = 0;
mutex_enter(&zp->z_lock);
if (zp->z_xattr_cached == NULL)
error = -zfs_sa_get_xattr(zp);
mutex_exit(&zp->z_lock);
if (error)
return (error);
ASSERT(zp->z_xattr_cached);
nvl = zp->z_xattr_cached;
if (value == NULL) {
error = -nvlist_remove(nvl, name, DATA_TYPE_BYTE_ARRAY);
if (error == -ENOENT)
error = zpl_xattr_set_dir(ip, name, NULL, 0, flags, cr);
} else {
/* Limited to 32k to keep nvpair memory allocations small */
if (size > DXATTR_MAX_ENTRY_SIZE)
return (-EFBIG);
/* Prevent the DXATTR SA from consuming the entire SA region */
error = -nvlist_size(nvl, &sa_size, NV_ENCODE_XDR);
if (error)
return (error);
if (sa_size > DXATTR_MAX_SA_SIZE)
return (-EFBIG);
error = -nvlist_add_byte_array(nvl, name,
(uchar_t *)value, size);
}
/*
* Update the SA for additions, modifications, and removals. On
* error drop the inconsistent cached version of the nvlist, it
* will be reconstructed from the ARC when next accessed.
*/
if (error == 0)
error = -zfs_sa_set_xattr(zp);
if (error) {
nvlist_free(nvl);
zp->z_xattr_cached = NULL;
}
ASSERT3S(error, <=, 0);
return (error);
}
static int
zpl_xattr_set(struct inode *ip, const char *name, const void *value,
size_t size, int flags)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ZTOZSB(zp);
cred_t *cr = CRED();
fstrans_cookie_t cookie;
int where;
int error;
crhold(cr);
cookie = spl_fstrans_mark();
ZPL_ENTER(zfsvfs);
ZPL_VERIFY_ZP(zp);
rw_enter(&zp->z_xattr_lock, RW_WRITER);
/*
* Before setting the xattr check to see if it already exists.
* This is done to ensure the following optional flags are honored.
*
* XATTR_CREATE: fail if xattr already exists
* XATTR_REPLACE: fail if xattr does not exist
*
* We also want to know if it resides in sa or dir, so we can make
* sure we don't end up with duplicate in both places.
*/
error = __zpl_xattr_where(ip, name, &where, cr);
if (error < 0) {
if (error != -ENODATA)
goto out;
if (flags & XATTR_REPLACE)
goto out;
/* The xattr to be removed already doesn't exist */
error = 0;
if (value == NULL)
goto out;
} else {
error = -EEXIST;
if (flags & XATTR_CREATE)
goto out;
}
/* Preferentially store the xattr as a SA for better performance */
if (zfsvfs->z_use_sa && zp->z_is_sa &&
(zfsvfs->z_xattr_sa || (value == NULL && where & XATTR_IN_SA))) {
error = zpl_xattr_set_sa(ip, name, value, size, flags, cr);
if (error == 0) {
/*
* Successfully put into SA, we need to clear the one
* in dir.
*/
if (where & XATTR_IN_DIR)
zpl_xattr_set_dir(ip, name, NULL, 0, 0, cr);
goto out;
}
}
error = zpl_xattr_set_dir(ip, name, value, size, flags, cr);
/*
* Successfully put into dir, we need to clear the one in SA.
*/
if (error == 0 && (where & XATTR_IN_SA))
zpl_xattr_set_sa(ip, name, NULL, 0, 0, cr);
out:
rw_exit(&zp->z_xattr_lock);
ZPL_EXIT(zfsvfs);
spl_fstrans_unmark(cookie);
crfree(cr);
ASSERT3S(error, <=, 0);
return (error);
}
/*
* Extended user attributes
*
* "Extended user attributes may be assigned to files and directories for
* storing arbitrary additional information such as the mime type,
* character set or encoding of a file. The access permissions for user
* attributes are defined by the file permission bits: read permission
* is required to retrieve the attribute value, and writer permission is
* required to change it.
*
* The file permission bits of regular files and directories are
* interpreted differently from the file permission bits of special
* files and symbolic links. For regular files and directories the file
* permission bits define access to the file's contents, while for
* device special files they define access to the device described by
* the special file. The file permissions of symbolic links are not
* used in access checks. These differences would allow users to
* consume filesystem resources in a way not controllable by disk quotas
* for group or world writable special files and directories.
*
* For this reason, extended user attributes are allowed only for
* regular files and directories, and access to extended user attributes
* is restricted to the owner and to users with appropriate capabilities
* for directories with the sticky bit set (see the chmod(1) manual page
* for an explanation of the sticky bit)." - xattr(7)
*
* ZFS allows extended user attributes to be disabled administratively
* by setting the 'xattr=off' property on the dataset.
*/
static int
__zpl_xattr_user_list(struct inode *ip, char *list, size_t list_size,
const char *name, size_t name_len)
{
return (ITOZSB(ip)->z_flags & ZSB_XATTR);
}
ZPL_XATTR_LIST_WRAPPER(zpl_xattr_user_list);
static int
__zpl_xattr_user_get(struct inode *ip, const char *name,
void *value, size_t size)
{
char *xattr_name;
int error;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") == 0)
return (-EINVAL);
#endif
if (!(ITOZSB(ip)->z_flags & ZSB_XATTR))
return (-EOPNOTSUPP);
xattr_name = kmem_asprintf("%s%s", XATTR_USER_PREFIX, name);
error = zpl_xattr_get(ip, xattr_name, value, size);
kmem_strfree(xattr_name);
return (error);
}
ZPL_XATTR_GET_WRAPPER(zpl_xattr_user_get);
static int
__zpl_xattr_user_set(struct inode *ip, const char *name,
const void *value, size_t size, int flags)
{
char *xattr_name;
int error;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") == 0)
return (-EINVAL);
#endif
if (!(ITOZSB(ip)->z_flags & ZSB_XATTR))
return (-EOPNOTSUPP);
xattr_name = kmem_asprintf("%s%s", XATTR_USER_PREFIX, name);
error = zpl_xattr_set(ip, xattr_name, value, size, flags);
kmem_strfree(xattr_name);
return (error);
}
ZPL_XATTR_SET_WRAPPER(zpl_xattr_user_set);
xattr_handler_t zpl_xattr_user_handler =
{
.prefix = XATTR_USER_PREFIX,
.list = zpl_xattr_user_list,
.get = zpl_xattr_user_get,
.set = zpl_xattr_user_set,
};
/*
* Trusted extended attributes
*
* "Trusted extended attributes are visible and accessible only to
* processes that have the CAP_SYS_ADMIN capability. Attributes in this
* class are used to implement mechanisms in user space (i.e., outside
* the kernel) which keep information in extended attributes to which
* ordinary processes should not have access." - xattr(7)
*/
static int
__zpl_xattr_trusted_list(struct inode *ip, char *list, size_t list_size,
const char *name, size_t name_len)
{
return (capable(CAP_SYS_ADMIN));
}
ZPL_XATTR_LIST_WRAPPER(zpl_xattr_trusted_list);
static int
__zpl_xattr_trusted_get(struct inode *ip, const char *name,
void *value, size_t size)
{
char *xattr_name;
int error;
if (!capable(CAP_SYS_ADMIN))
return (-EACCES);
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") == 0)
return (-EINVAL);
#endif
xattr_name = kmem_asprintf("%s%s", XATTR_TRUSTED_PREFIX, name);
error = zpl_xattr_get(ip, xattr_name, value, size);
kmem_strfree(xattr_name);
return (error);
}
ZPL_XATTR_GET_WRAPPER(zpl_xattr_trusted_get);
static int
__zpl_xattr_trusted_set(struct inode *ip, const char *name,
const void *value, size_t size, int flags)
{
char *xattr_name;
int error;
if (!capable(CAP_SYS_ADMIN))
return (-EACCES);
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") == 0)
return (-EINVAL);
#endif
xattr_name = kmem_asprintf("%s%s", XATTR_TRUSTED_PREFIX, name);
error = zpl_xattr_set(ip, xattr_name, value, size, flags);
kmem_strfree(xattr_name);
return (error);
}
ZPL_XATTR_SET_WRAPPER(zpl_xattr_trusted_set);
xattr_handler_t zpl_xattr_trusted_handler =
{
.prefix = XATTR_TRUSTED_PREFIX,
.list = zpl_xattr_trusted_list,
.get = zpl_xattr_trusted_get,
.set = zpl_xattr_trusted_set,
};
/*
* Extended security attributes
*
* "The security attribute namespace is used by kernel security modules,
* such as Security Enhanced Linux, and also to implement file
* capabilities (see capabilities(7)). Read and write access
* permissions to security attributes depend on the policy implemented
* for each security attribute by the security module. When no security
* module is loaded, all processes have read access to extended security
* attributes, and write access is limited to processes that have the
* CAP_SYS_ADMIN capability." - xattr(7)
*/
static int
__zpl_xattr_security_list(struct inode *ip, char *list, size_t list_size,
const char *name, size_t name_len)
{
return (1);
}
ZPL_XATTR_LIST_WRAPPER(zpl_xattr_security_list);
static int
__zpl_xattr_security_get(struct inode *ip, const char *name,
void *value, size_t size)
{
char *xattr_name;
int error;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") == 0)
return (-EINVAL);
#endif
xattr_name = kmem_asprintf("%s%s", XATTR_SECURITY_PREFIX, name);
error = zpl_xattr_get(ip, xattr_name, value, size);
kmem_strfree(xattr_name);
return (error);
}
ZPL_XATTR_GET_WRAPPER(zpl_xattr_security_get);
static int
__zpl_xattr_security_set(struct inode *ip, const char *name,
const void *value, size_t size, int flags)
{
char *xattr_name;
int error;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") == 0)
return (-EINVAL);
#endif
xattr_name = kmem_asprintf("%s%s", XATTR_SECURITY_PREFIX, name);
error = zpl_xattr_set(ip, xattr_name, value, size, flags);
kmem_strfree(xattr_name);
return (error);
}
ZPL_XATTR_SET_WRAPPER(zpl_xattr_security_set);
static int
zpl_xattr_security_init_impl(struct inode *ip, const struct xattr *xattrs,
void *fs_info)
{
const struct xattr *xattr;
int error = 0;
for (xattr = xattrs; xattr->name != NULL; xattr++) {
error = __zpl_xattr_security_set(ip,
xattr->name, xattr->value, xattr->value_len, 0);
if (error < 0)
break;
}
return (error);
}
int
zpl_xattr_security_init(struct inode *ip, struct inode *dip,
const struct qstr *qstr)
{
return security_inode_init_security(ip, dip, qstr,
&zpl_xattr_security_init_impl, NULL);
}
/*
* Security xattr namespace handlers.
*/
xattr_handler_t zpl_xattr_security_handler = {
.prefix = XATTR_SECURITY_PREFIX,
.list = zpl_xattr_security_list,
.get = zpl_xattr_security_get,
.set = zpl_xattr_security_set,
};
/*
* Extended system attributes
*
* "Extended system attributes are used by the kernel to store system
* objects such as Access Control Lists. Read and write access permissions
* to system attributes depend on the policy implemented for each system
* attribute implemented by filesystems in the kernel." - xattr(7)
*/
#ifdef CONFIG_FS_POSIX_ACL
static int
zpl_set_acl_impl(struct inode *ip, struct posix_acl *acl, int type)
{
char *name, *value = NULL;
int error = 0;
size_t size = 0;
if (S_ISLNK(ip->i_mode))
return (-EOPNOTSUPP);
switch (type) {
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
if (acl) {
umode_t mode = ip->i_mode;
error = posix_acl_equiv_mode(acl, &mode);
if (error < 0) {
return (error);
} else {
/*
* The mode bits will have been set by
* ->zfs_setattr()->zfs_acl_chmod_setattr()
* using the ZFS ACL conversion. If they
* differ from the Posix ACL conversion dirty
* the inode to write the Posix mode bits.
*/
if (ip->i_mode != mode) {
ip->i_mode = mode;
ip->i_ctime = current_time(ip);
zfs_mark_inode_dirty(ip);
}
if (error == 0)
acl = NULL;
}
}
break;
case ACL_TYPE_DEFAULT:
name = XATTR_NAME_POSIX_ACL_DEFAULT;
if (!S_ISDIR(ip->i_mode))
return (acl ? -EACCES : 0);
break;
default:
return (-EINVAL);
}
if (acl) {
size = posix_acl_xattr_size(acl->a_count);
value = kmem_alloc(size, KM_SLEEP);
error = zpl_acl_to_xattr(acl, value, size);
if (error < 0) {
kmem_free(value, size);
return (error);
}
}
error = zpl_xattr_set(ip, name, value, size, 0);
if (value)
kmem_free(value, size);
if (!error) {
if (acl)
zpl_set_cached_acl(ip, type, acl);
else
zpl_forget_cached_acl(ip, type);
}
return (error);
}
#ifdef HAVE_SET_ACL
int
#ifdef HAVE_SET_ACL_USERNS
zpl_set_acl(struct user_namespace *userns, struct inode *ip,
struct posix_acl *acl, int type)
#else
zpl_set_acl(struct inode *ip, struct posix_acl *acl, int type)
#endif /* HAVE_SET_ACL_USERNS */
{
return (zpl_set_acl_impl(ip, acl, type));
}
#endif /* HAVE_SET_ACL */
-struct posix_acl *
-zpl_get_acl(struct inode *ip, int type)
+static struct posix_acl *
+zpl_get_acl_impl(struct inode *ip, int type)
{
struct posix_acl *acl;
void *value = NULL;
char *name;
- int size;
/*
* As of Linux 3.14, the kernel get_acl will check this for us.
* Also as of Linux 4.7, comparing against ACL_NOT_CACHED is wrong
* as the kernel get_acl will set it to temporary sentinel value.
*/
#ifndef HAVE_KERNEL_GET_ACL_HANDLE_CACHE
acl = get_cached_acl(ip, type);
if (acl != ACL_NOT_CACHED)
return (acl);
#endif
switch (type) {
case ACL_TYPE_ACCESS:
name = XATTR_NAME_POSIX_ACL_ACCESS;
break;
case ACL_TYPE_DEFAULT:
name = XATTR_NAME_POSIX_ACL_DEFAULT;
break;
default:
return (ERR_PTR(-EINVAL));
}
- size = zpl_xattr_get(ip, name, NULL, 0);
+ int size = zpl_xattr_get(ip, name, NULL, 0);
if (size > 0) {
value = kmem_alloc(size, KM_SLEEP);
size = zpl_xattr_get(ip, name, value, size);
}
if (size > 0) {
acl = zpl_acl_from_xattr(value, size);
} else if (size == -ENODATA || size == -ENOSYS) {
acl = NULL;
} else {
acl = ERR_PTR(-EIO);
}
if (size > 0)
kmem_free(value, size);
/* As of Linux 4.7, the kernel get_acl will set this for us */
#ifndef HAVE_KERNEL_GET_ACL_HANDLE_CACHE
if (!IS_ERR(acl))
zpl_set_cached_acl(ip, type, acl);
#endif
return (acl);
}
+#if defined(HAVE_GET_ACL_RCU)
+struct posix_acl *
+zpl_get_acl(struct inode *ip, int type, bool rcu)
+{
+ if (rcu)
+ return (ERR_PTR(-ECHILD));
+
+ return (zpl_get_acl_impl(ip, type));
+}
+#elif defined(HAVE_GET_ACL)
+struct posix_acl *
+zpl_get_acl(struct inode *ip, int type)
+{
+ return (zpl_get_acl_impl(ip, type));
+}
+#else
+#error "Unsupported iops->get_acl() implementation"
+#endif /* HAVE_GET_ACL_RCU */
+
int
zpl_init_acl(struct inode *ip, struct inode *dir)
{
struct posix_acl *acl = NULL;
int error = 0;
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (0);
if (!S_ISLNK(ip->i_mode)) {
- acl = zpl_get_acl(dir, ACL_TYPE_DEFAULT);
+ acl = zpl_get_acl_impl(dir, ACL_TYPE_DEFAULT);
if (IS_ERR(acl))
return (PTR_ERR(acl));
if (!acl) {
ip->i_mode &= ~current_umask();
ip->i_ctime = current_time(ip);
zfs_mark_inode_dirty(ip);
return (0);
}
}
if (acl) {
umode_t mode;
if (S_ISDIR(ip->i_mode)) {
error = zpl_set_acl_impl(ip, acl, ACL_TYPE_DEFAULT);
if (error)
goto out;
}
mode = ip->i_mode;
error = __posix_acl_create(&acl, GFP_KERNEL, &mode);
if (error >= 0) {
ip->i_mode = mode;
zfs_mark_inode_dirty(ip);
if (error > 0) {
error = zpl_set_acl_impl(ip, acl,
ACL_TYPE_ACCESS);
}
}
}
out:
zpl_posix_acl_release(acl);
return (error);
}
int
zpl_chmod_acl(struct inode *ip)
{
struct posix_acl *acl;
int error;
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (0);
if (S_ISLNK(ip->i_mode))
return (-EOPNOTSUPP);
- acl = zpl_get_acl(ip, ACL_TYPE_ACCESS);
+ acl = zpl_get_acl_impl(ip, ACL_TYPE_ACCESS);
if (IS_ERR(acl) || !acl)
return (PTR_ERR(acl));
error = __posix_acl_chmod(&acl, GFP_KERNEL, ip->i_mode);
if (!error)
error = zpl_set_acl_impl(ip, acl, ACL_TYPE_ACCESS);
zpl_posix_acl_release(acl);
return (error);
}
static int
__zpl_xattr_acl_list_access(struct inode *ip, char *list, size_t list_size,
const char *name, size_t name_len)
{
char *xattr_name = XATTR_NAME_POSIX_ACL_ACCESS;
size_t xattr_size = sizeof (XATTR_NAME_POSIX_ACL_ACCESS);
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (0);
if (list && xattr_size <= list_size)
memcpy(list, xattr_name, xattr_size);
return (xattr_size);
}
ZPL_XATTR_LIST_WRAPPER(zpl_xattr_acl_list_access);
static int
__zpl_xattr_acl_list_default(struct inode *ip, char *list, size_t list_size,
const char *name, size_t name_len)
{
char *xattr_name = XATTR_NAME_POSIX_ACL_DEFAULT;
size_t xattr_size = sizeof (XATTR_NAME_POSIX_ACL_DEFAULT);
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (0);
if (list && xattr_size <= list_size)
memcpy(list, xattr_name, xattr_size);
return (xattr_size);
}
ZPL_XATTR_LIST_WRAPPER(zpl_xattr_acl_list_default);
static int
__zpl_xattr_acl_get_access(struct inode *ip, const char *name,
void *buffer, size_t size)
{
struct posix_acl *acl;
int type = ACL_TYPE_ACCESS;
int error;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") != 0)
return (-EINVAL);
#endif
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (-EOPNOTSUPP);
- acl = zpl_get_acl(ip, type);
+ acl = zpl_get_acl_impl(ip, type);
if (IS_ERR(acl))
return (PTR_ERR(acl));
if (acl == NULL)
return (-ENODATA);
error = zpl_acl_to_xattr(acl, buffer, size);
zpl_posix_acl_release(acl);
return (error);
}
ZPL_XATTR_GET_WRAPPER(zpl_xattr_acl_get_access);
static int
__zpl_xattr_acl_get_default(struct inode *ip, const char *name,
void *buffer, size_t size)
{
struct posix_acl *acl;
int type = ACL_TYPE_DEFAULT;
int error;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") != 0)
return (-EINVAL);
#endif
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (-EOPNOTSUPP);
- acl = zpl_get_acl(ip, type);
+ acl = zpl_get_acl_impl(ip, type);
if (IS_ERR(acl))
return (PTR_ERR(acl));
if (acl == NULL)
return (-ENODATA);
error = zpl_acl_to_xattr(acl, buffer, size);
zpl_posix_acl_release(acl);
return (error);
}
ZPL_XATTR_GET_WRAPPER(zpl_xattr_acl_get_default);
static int
__zpl_xattr_acl_set_access(struct inode *ip, const char *name,
const void *value, size_t size, int flags)
{
struct posix_acl *acl;
int type = ACL_TYPE_ACCESS;
int error = 0;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") != 0)
return (-EINVAL);
#endif
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (-EOPNOTSUPP);
if (!zpl_inode_owner_or_capable(kcred->user_ns, ip))
return (-EPERM);
if (value) {
acl = zpl_acl_from_xattr(value, size);
if (IS_ERR(acl))
return (PTR_ERR(acl));
else if (acl) {
error = zpl_posix_acl_valid(ip, acl);
if (error) {
zpl_posix_acl_release(acl);
return (error);
}
}
} else {
acl = NULL;
}
error = zpl_set_acl_impl(ip, acl, type);
zpl_posix_acl_release(acl);
return (error);
}
ZPL_XATTR_SET_WRAPPER(zpl_xattr_acl_set_access);
static int
__zpl_xattr_acl_set_default(struct inode *ip, const char *name,
const void *value, size_t size, int flags)
{
struct posix_acl *acl;
int type = ACL_TYPE_DEFAULT;
int error = 0;
/* xattr_resolve_name will do this for us if this is defined */
#ifndef HAVE_XATTR_HANDLER_NAME
if (strcmp(name, "") != 0)
return (-EINVAL);
#endif
if (ITOZSB(ip)->z_acl_type != ZFS_ACLTYPE_POSIX)
return (-EOPNOTSUPP);
if (!zpl_inode_owner_or_capable(kcred->user_ns, ip))
return (-EPERM);
if (value) {
acl = zpl_acl_from_xattr(value, size);
if (IS_ERR(acl))
return (PTR_ERR(acl));
else if (acl) {
error = zpl_posix_acl_valid(ip, acl);
if (error) {
zpl_posix_acl_release(acl);
return (error);
}
}
} else {
acl = NULL;
}
error = zpl_set_acl_impl(ip, acl, type);
zpl_posix_acl_release(acl);
return (error);
}
ZPL_XATTR_SET_WRAPPER(zpl_xattr_acl_set_default);
/*
* ACL access xattr namespace handlers.
*
* Use .name instead of .prefix when available. xattr_resolve_name will match
* whole name and reject anything that has .name only as prefix.
*/
xattr_handler_t zpl_xattr_acl_access_handler =
{
#ifdef HAVE_XATTR_HANDLER_NAME
.name = XATTR_NAME_POSIX_ACL_ACCESS,
#else
.prefix = XATTR_NAME_POSIX_ACL_ACCESS,
#endif
.list = zpl_xattr_acl_list_access,
.get = zpl_xattr_acl_get_access,
.set = zpl_xattr_acl_set_access,
#if defined(HAVE_XATTR_LIST_SIMPLE) || \
defined(HAVE_XATTR_LIST_DENTRY) || \
defined(HAVE_XATTR_LIST_HANDLER)
.flags = ACL_TYPE_ACCESS,
#endif
};
/*
* ACL default xattr namespace handlers.
*
* Use .name instead of .prefix when available. xattr_resolve_name will match
* whole name and reject anything that has .name only as prefix.
*/
xattr_handler_t zpl_xattr_acl_default_handler =
{
#ifdef HAVE_XATTR_HANDLER_NAME
.name = XATTR_NAME_POSIX_ACL_DEFAULT,
#else
.prefix = XATTR_NAME_POSIX_ACL_DEFAULT,
#endif
.list = zpl_xattr_acl_list_default,
.get = zpl_xattr_acl_get_default,
.set = zpl_xattr_acl_set_default,
#if defined(HAVE_XATTR_LIST_SIMPLE) || \
defined(HAVE_XATTR_LIST_DENTRY) || \
defined(HAVE_XATTR_LIST_HANDLER)
.flags = ACL_TYPE_DEFAULT,
#endif
};
#endif /* CONFIG_FS_POSIX_ACL */
xattr_handler_t *zpl_xattr_handlers[] = {
&zpl_xattr_security_handler,
&zpl_xattr_trusted_handler,
&zpl_xattr_user_handler,
#ifdef CONFIG_FS_POSIX_ACL
&zpl_xattr_acl_access_handler,
&zpl_xattr_acl_default_handler,
#endif /* CONFIG_FS_POSIX_ACL */
NULL
};
static const struct xattr_handler *
zpl_xattr_handler(const char *name)
{
if (strncmp(name, XATTR_USER_PREFIX,
XATTR_USER_PREFIX_LEN) == 0)
return (&zpl_xattr_user_handler);
if (strncmp(name, XATTR_TRUSTED_PREFIX,
XATTR_TRUSTED_PREFIX_LEN) == 0)
return (&zpl_xattr_trusted_handler);
if (strncmp(name, XATTR_SECURITY_PREFIX,
XATTR_SECURITY_PREFIX_LEN) == 0)
return (&zpl_xattr_security_handler);
#ifdef CONFIG_FS_POSIX_ACL
if (strncmp(name, XATTR_NAME_POSIX_ACL_ACCESS,
sizeof (XATTR_NAME_POSIX_ACL_ACCESS)) == 0)
return (&zpl_xattr_acl_access_handler);
if (strncmp(name, XATTR_NAME_POSIX_ACL_DEFAULT,
sizeof (XATTR_NAME_POSIX_ACL_DEFAULT)) == 0)
return (&zpl_xattr_acl_default_handler);
#endif /* CONFIG_FS_POSIX_ACL */
return (NULL);
}
#if !defined(HAVE_POSIX_ACL_RELEASE) || defined(HAVE_POSIX_ACL_RELEASE_GPL_ONLY)
struct acl_rel_struct {
struct acl_rel_struct *next;
struct posix_acl *acl;
clock_t time;
};
#define ACL_REL_GRACE (60*HZ)
#define ACL_REL_WINDOW (1*HZ)
#define ACL_REL_SCHED (ACL_REL_GRACE+ACL_REL_WINDOW)
/*
* Lockless multi-producer single-consumer fifo list.
* Nodes are added to tail and removed from head. Tail pointer is our
* synchronization point. It always points to the next pointer of the last
* node, or head if list is empty.
*/
static struct acl_rel_struct *acl_rel_head = NULL;
static struct acl_rel_struct **acl_rel_tail = &acl_rel_head;
static void
zpl_posix_acl_free(void *arg)
{
struct acl_rel_struct *freelist = NULL;
struct acl_rel_struct *a;
clock_t new_time;
boolean_t refire = B_FALSE;
ASSERT3P(acl_rel_head, !=, NULL);
while (acl_rel_head) {
a = acl_rel_head;
if (ddi_get_lbolt() - a->time >= ACL_REL_GRACE) {
/*
* If a is the last node we need to reset tail, but we
* need to use cmpxchg to make sure it is still the
* last node.
*/
if (acl_rel_tail == &a->next) {
acl_rel_head = NULL;
if (cmpxchg(&acl_rel_tail, &a->next,
&acl_rel_head) == &a->next) {
ASSERT3P(a->next, ==, NULL);
a->next = freelist;
freelist = a;
break;
}
}
/*
* a is not last node, make sure next pointer is set
* by the adder and advance the head.
*/
while (READ_ONCE(a->next) == NULL)
cpu_relax();
acl_rel_head = a->next;
a->next = freelist;
freelist = a;
} else {
/*
* a is still in grace period. We are responsible to
* reschedule the free task, since adder will only do
* so if list is empty.
*/
new_time = a->time + ACL_REL_SCHED;
refire = B_TRUE;
break;
}
}
if (refire)
taskq_dispatch_delay(system_delay_taskq, zpl_posix_acl_free,
NULL, TQ_SLEEP, new_time);
while (freelist) {
a = freelist;
freelist = a->next;
kfree(a->acl);
kmem_free(a, sizeof (struct acl_rel_struct));
}
}
void
zpl_posix_acl_release_impl(struct posix_acl *acl)
{
struct acl_rel_struct *a, **prev;
a = kmem_alloc(sizeof (struct acl_rel_struct), KM_SLEEP);
a->next = NULL;
a->acl = acl;
a->time = ddi_get_lbolt();
/* atomically points tail to us and get the previous tail */
prev = xchg(&acl_rel_tail, &a->next);
ASSERT3P(*prev, ==, NULL);
*prev = a;
/* if it was empty before, schedule the free task */
if (prev == &acl_rel_head)
taskq_dispatch_delay(system_delay_taskq, zpl_posix_acl_free,
NULL, TQ_SLEEP, ddi_get_lbolt() + ACL_REL_SCHED);
}
#endif
diff --git a/sys/contrib/openzfs/module/zfs/abd.c b/sys/contrib/openzfs/module/zfs/abd.c
index f306c7a1dcca..bf39cd613330 100644
--- a/sys/contrib/openzfs/module/zfs/abd.c
+++ b/sys/contrib/openzfs/module/zfs/abd.c
@@ -1,1216 +1,1216 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014 by Chunwei Chen. All rights reserved.
* Copyright (c) 2019 by Delphix. All rights reserved.
*/
/*
* ARC buffer data (ABD).
*
* ABDs are an abstract data structure for the ARC which can use two
* different ways of storing the underlying data:
*
* (a) Linear buffer. In this case, all the data in the ABD is stored in one
* contiguous buffer in memory (from a zio_[data_]buf_* kmem cache).
*
* +-------------------+
* | ABD (linear) |
* | abd_flags = ... |
* | abd_size = ... | +--------------------------------+
* | abd_buf ------------->| raw buffer of size abd_size |
* +-------------------+ +--------------------------------+
* no abd_chunks
*
* (b) Scattered buffer. In this case, the data in the ABD is split into
* equal-sized chunks (from the abd_chunk_cache kmem_cache), with pointers
* to the chunks recorded in an array at the end of the ABD structure.
*
* +-------------------+
* | ABD (scattered) |
* | abd_flags = ... |
* | abd_size = ... |
* | abd_offset = 0 | +-----------+
* | abd_chunks[0] ----------------------------->| chunk 0 |
* | abd_chunks[1] ---------------------+ +-----------+
* | ... | | +-----------+
* | abd_chunks[N-1] ---------+ +------->| chunk 1 |
* +-------------------+ | +-----------+
* | ...
* | +-----------+
* +----------------->| chunk N-1 |
* +-----------+
*
* In addition to directly allocating a linear or scattered ABD, it is also
* possible to create an ABD by requesting the "sub-ABD" starting at an offset
* within an existing ABD. In linear buffers this is simple (set abd_buf of
* the new ABD to the starting point within the original raw buffer), but
* scattered ABDs are a little more complex. The new ABD makes a copy of the
* relevant abd_chunks pointers (but not the underlying data). However, to
* provide arbitrary rather than only chunk-aligned starting offsets, it also
* tracks an abd_offset field which represents the starting point of the data
* within the first chunk in abd_chunks. For both linear and scattered ABDs,
* creating an offset ABD marks the original ABD as the offset's parent, and the
* original ABD's abd_children refcount is incremented. This data allows us to
* ensure the root ABD isn't deleted before its children.
*
* Most consumers should never need to know what type of ABD they're using --
* the ABD public API ensures that it's possible to transparently switch from
* using a linear ABD to a scattered one when doing so would be beneficial.
*
* If you need to use the data within an ABD directly, if you know it's linear
* (because you allocated it) you can use abd_to_buf() to access the underlying
* raw buffer. Otherwise, you should use one of the abd_borrow_buf* functions
* which will allocate a raw buffer if necessary. Use the abd_return_buf*
* functions to return any raw buffers that are no longer necessary when you're
* done using them.
*
* There are a variety of ABD APIs that implement basic buffer operations:
* compare, copy, read, write, and fill with zeroes. If you need a custom
* function which progressively accesses the whole ABD, use the abd_iterate_*
* functions.
*
* As an additional feature, linear and scatter ABD's can be stitched together
* by using the gang ABD type (abd_alloc_gang_abd()). This allows for
* multiple ABDs to be viewed as a singular ABD.
*
* It is possible to make all ABDs linear by setting zfs_abd_scatter_enabled to
* B_FALSE.
*/
#include <sys/abd_impl.h>
#include <sys/param.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/zfs_znode.h>
/* see block comment above for description */
int zfs_abd_scatter_enabled = B_TRUE;
void
abd_verify(abd_t *abd)
{
#ifdef ZFS_DEBUG
ASSERT3U(abd->abd_size, >, 0);
ASSERT3U(abd->abd_size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(abd->abd_flags, ==, abd->abd_flags & (ABD_FLAG_LINEAR |
ABD_FLAG_OWNER | ABD_FLAG_META | ABD_FLAG_MULTI_ZONE |
ABD_FLAG_MULTI_CHUNK | ABD_FLAG_LINEAR_PAGE | ABD_FLAG_GANG |
ABD_FLAG_GANG_FREE | ABD_FLAG_ZEROS | ABD_FLAG_ALLOCD));
IMPLY(abd->abd_parent != NULL, !(abd->abd_flags & ABD_FLAG_OWNER));
IMPLY(abd->abd_flags & ABD_FLAG_META, abd->abd_flags & ABD_FLAG_OWNER);
if (abd_is_linear(abd)) {
ASSERT3P(ABD_LINEAR_BUF(abd), !=, NULL);
} else if (abd_is_gang(abd)) {
uint_t child_sizes = 0;
for (abd_t *cabd = list_head(&ABD_GANG(abd).abd_gang_chain);
cabd != NULL;
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
ASSERT(list_link_active(&cabd->abd_gang_link));
child_sizes += cabd->abd_size;
abd_verify(cabd);
}
ASSERT3U(abd->abd_size, ==, child_sizes);
} else {
abd_verify_scatter(abd);
}
#endif
}
static void
abd_init_struct(abd_t *abd)
{
list_link_init(&abd->abd_gang_link);
mutex_init(&abd->abd_mtx, NULL, MUTEX_DEFAULT, NULL);
abd->abd_flags = 0;
#ifdef ZFS_DEBUG
zfs_refcount_create(&abd->abd_children);
abd->abd_parent = NULL;
#endif
abd->abd_size = 0;
}
static void
abd_fini_struct(abd_t *abd)
{
mutex_destroy(&abd->abd_mtx);
ASSERT(!list_link_active(&abd->abd_gang_link));
#ifdef ZFS_DEBUG
zfs_refcount_destroy(&abd->abd_children);
#endif
}
abd_t *
abd_alloc_struct(size_t size)
{
abd_t *abd = abd_alloc_struct_impl(size);
abd_init_struct(abd);
abd->abd_flags |= ABD_FLAG_ALLOCD;
return (abd);
}
void
abd_free_struct(abd_t *abd)
{
abd_fini_struct(abd);
abd_free_struct_impl(abd);
}
/*
* Allocate an ABD, along with its own underlying data buffers. Use this if you
* don't care whether the ABD is linear or not.
*/
abd_t *
abd_alloc(size_t size, boolean_t is_metadata)
{
if (abd_size_alloc_linear(size))
return (abd_alloc_linear(size, is_metadata));
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
abd_t *abd = abd_alloc_struct(size);
abd->abd_flags |= ABD_FLAG_OWNER;
abd->abd_u.abd_scatter.abd_offset = 0;
abd_alloc_chunks(abd, size);
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd->abd_size = size;
abd_update_scatter_stats(abd, ABDSTAT_INCR);
return (abd);
}
/*
* Allocate an ABD that must be linear, along with its own underlying data
* buffer. Only use this when it would be very annoying to write your ABD
* consumer with a scattered ABD.
*/
abd_t *
abd_alloc_linear(size_t size, boolean_t is_metadata)
{
abd_t *abd = abd_alloc_struct(0);
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
abd->abd_flags |= ABD_FLAG_LINEAR | ABD_FLAG_OWNER;
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd->abd_size = size;
if (is_metadata) {
ABD_LINEAR_BUF(abd) = zio_buf_alloc(size);
} else {
ABD_LINEAR_BUF(abd) = zio_data_buf_alloc(size);
}
abd_update_linear_stats(abd, ABDSTAT_INCR);
return (abd);
}
static void
abd_free_linear(abd_t *abd)
{
if (abd_is_linear_page(abd)) {
abd_free_linear_page(abd);
return;
}
if (abd->abd_flags & ABD_FLAG_META) {
zio_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
} else {
zio_data_buf_free(ABD_LINEAR_BUF(abd), abd->abd_size);
}
abd_update_linear_stats(abd, ABDSTAT_DECR);
}
static void
abd_free_gang(abd_t *abd)
{
ASSERT(abd_is_gang(abd));
abd_t *cabd;
while ((cabd = list_head(&ABD_GANG(abd).abd_gang_chain)) != NULL) {
/*
* We must acquire the child ABDs mutex to ensure that if it
* is being added to another gang ABD we will set the link
* as inactive when removing it from this gang ABD and before
* adding it to the other gang ABD.
*/
mutex_enter(&cabd->abd_mtx);
ASSERT(list_link_active(&cabd->abd_gang_link));
list_remove(&ABD_GANG(abd).abd_gang_chain, cabd);
mutex_exit(&cabd->abd_mtx);
if (cabd->abd_flags & ABD_FLAG_GANG_FREE)
abd_free(cabd);
}
list_destroy(&ABD_GANG(abd).abd_gang_chain);
}
static void
abd_free_scatter(abd_t *abd)
{
abd_free_chunks(abd);
abd_update_scatter_stats(abd, ABDSTAT_DECR);
}
/*
* Free an ABD. Use with any kind of abd: those created with abd_alloc_*()
* and abd_get_*(), including abd_get_offset_struct().
*
* If the ABD was created with abd_alloc_*(), the underlying data
* (scatterlist or linear buffer) will also be freed. (Subject to ownership
* changes via abd_*_ownership_of_buf().)
*
* Unless the ABD was created with abd_get_offset_struct(), the abd_t will
* also be freed.
*/
void
abd_free(abd_t *abd)
{
if (abd == NULL)
return;
abd_verify(abd);
#ifdef ZFS_DEBUG
IMPLY(abd->abd_flags & ABD_FLAG_OWNER, abd->abd_parent == NULL);
#endif
if (abd_is_gang(abd)) {
abd_free_gang(abd);
} else if (abd_is_linear(abd)) {
if (abd->abd_flags & ABD_FLAG_OWNER)
abd_free_linear(abd);
} else {
if (abd->abd_flags & ABD_FLAG_OWNER)
abd_free_scatter(abd);
}
#ifdef ZFS_DEBUG
if (abd->abd_parent != NULL) {
(void) zfs_refcount_remove_many(&abd->abd_parent->abd_children,
abd->abd_size, abd);
}
#endif
abd_fini_struct(abd);
if (abd->abd_flags & ABD_FLAG_ALLOCD)
abd_free_struct_impl(abd);
}
/*
* Allocate an ABD of the same format (same metadata flag, same scatterize
* setting) as another ABD.
*/
abd_t *
abd_alloc_sametype(abd_t *sabd, size_t size)
{
boolean_t is_metadata = (sabd->abd_flags & ABD_FLAG_META) != 0;
if (abd_is_linear(sabd) &&
!abd_is_linear_page(sabd)) {
return (abd_alloc_linear(size, is_metadata));
} else {
return (abd_alloc(size, is_metadata));
}
}
/*
* Create gang ABD that will be the head of a list of ABD's. This is used
* to "chain" scatter/gather lists together when constructing aggregated
* IO's. To free this abd, abd_free() must be called.
*/
abd_t *
abd_alloc_gang(void)
{
abd_t *abd = abd_alloc_struct(0);
abd->abd_flags |= ABD_FLAG_GANG | ABD_FLAG_OWNER;
list_create(&ABD_GANG(abd).abd_gang_chain,
sizeof (abd_t), offsetof(abd_t, abd_gang_link));
return (abd);
}
/*
* Add a child gang ABD to a parent gang ABDs chained list.
*/
static void
abd_gang_add_gang(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
{
ASSERT(abd_is_gang(pabd));
ASSERT(abd_is_gang(cabd));
if (free_on_free) {
/*
* If the parent is responsible for freeing the child gang
* ABD we will just splice the child's children ABD list to
* the parent's list and immediately free the child gang ABD
* struct. The parent gang ABDs children from the child gang
* will retain all the free_on_free settings after being
* added to the parents list.
*/
pabd->abd_size += cabd->abd_size;
list_move_tail(&ABD_GANG(pabd).abd_gang_chain,
&ABD_GANG(cabd).abd_gang_chain);
ASSERT(list_is_empty(&ABD_GANG(cabd).abd_gang_chain));
abd_verify(pabd);
abd_free(cabd);
} else {
for (abd_t *child = list_head(&ABD_GANG(cabd).abd_gang_chain);
child != NULL;
child = list_next(&ABD_GANG(cabd).abd_gang_chain, child)) {
/*
* We always pass B_FALSE for free_on_free as it is the
* original child gang ABDs responsibility to determine
* if any of its child ABDs should be free'd on the call
* to abd_free().
*/
abd_gang_add(pabd, child, B_FALSE);
}
abd_verify(pabd);
}
}
/*
* Add a child ABD to a gang ABD's chained list.
*/
void
abd_gang_add(abd_t *pabd, abd_t *cabd, boolean_t free_on_free)
{
ASSERT(abd_is_gang(pabd));
abd_t *child_abd = NULL;
/*
* If the child being added is a gang ABD, we will add the
* child's ABDs to the parent gang ABD. This allows us to account
* for the offset correctly in the parent gang ABD.
*/
if (abd_is_gang(cabd)) {
ASSERT(!list_link_active(&cabd->abd_gang_link));
ASSERT(!list_is_empty(&ABD_GANG(cabd).abd_gang_chain));
return (abd_gang_add_gang(pabd, cabd, free_on_free));
}
ASSERT(!abd_is_gang(cabd));
/*
* In order to verify that an ABD is not already part of
* another gang ABD, we must lock the child ABD's abd_mtx
* to check its abd_gang_link status. We unlock the abd_mtx
* only after it is has been added to a gang ABD, which
* will update the abd_gang_link's status. See comment below
* for how an ABD can be in multiple gang ABD's simultaneously.
*/
mutex_enter(&cabd->abd_mtx);
if (list_link_active(&cabd->abd_gang_link)) {
/*
* If the child ABD is already part of another
* gang ABD then we must allocate a new
* ABD to use a separate link. We mark the newly
* allocated ABD with ABD_FLAG_GANG_FREE, before
* adding it to the gang ABD's list, to make the
* gang ABD aware that it is responsible to call
* abd_free(). We use abd_get_offset() in order
* to just allocate a new ABD but avoid copying the
* data over into the newly allocated ABD.
*
* An ABD may become part of multiple gang ABD's. For
* example, when writing ditto bocks, the same ABD
* is used to write 2 or 3 locations with 2 or 3
* zio_t's. Each of the zio's may be aggregated with
* different adjacent zio's. zio aggregation uses gang
* zio's, so the single ABD can become part of multiple
* gang zio's.
*
* The ASSERT below is to make sure that if
* free_on_free is passed as B_TRUE, the ABD can
* not be in multiple gang ABD's. The gang ABD
* can not be responsible for cleaning up the child
* ABD memory allocation if the ABD can be in
* multiple gang ABD's at one time.
*/
ASSERT3B(free_on_free, ==, B_FALSE);
child_abd = abd_get_offset(cabd, 0);
child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
} else {
child_abd = cabd;
if (free_on_free)
child_abd->abd_flags |= ABD_FLAG_GANG_FREE;
}
ASSERT3P(child_abd, !=, NULL);
list_insert_tail(&ABD_GANG(pabd).abd_gang_chain, child_abd);
mutex_exit(&cabd->abd_mtx);
pabd->abd_size += child_abd->abd_size;
}
/*
* Locate the ABD for the supplied offset in the gang ABD.
* Return a new offset relative to the returned ABD.
*/
abd_t *
abd_gang_get_offset(abd_t *abd, size_t *off)
{
abd_t *cabd;
ASSERT(abd_is_gang(abd));
ASSERT3U(*off, <, abd->abd_size);
for (cabd = list_head(&ABD_GANG(abd).abd_gang_chain); cabd != NULL;
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd)) {
if (*off >= cabd->abd_size)
*off -= cabd->abd_size;
else
return (cabd);
}
VERIFY3P(cabd, !=, NULL);
return (cabd);
}
/*
* Allocate a new ABD, using the provided struct (if non-NULL, and if
* circumstances allow - otherwise allocate the struct). The returned ABD will
* point to offset off of sabd. It shares the underlying buffer data with sabd.
* Use abd_free() to free. sabd must not be freed while any derived ABDs exist.
*/
static abd_t *
abd_get_offset_impl(abd_t *abd, abd_t *sabd, size_t off, size_t size)
{
abd_verify(sabd);
ASSERT3U(off + size, <=, sabd->abd_size);
if (abd_is_linear(sabd)) {
if (abd == NULL)
abd = abd_alloc_struct(0);
/*
* Even if this buf is filesystem metadata, we only track that
* if we own the underlying data buffer, which is not true in
* this case. Therefore, we don't ever use ABD_FLAG_META here.
*/
abd->abd_flags |= ABD_FLAG_LINEAR;
ABD_LINEAR_BUF(abd) = (char *)ABD_LINEAR_BUF(sabd) + off;
} else if (abd_is_gang(sabd)) {
size_t left = size;
if (abd == NULL) {
abd = abd_alloc_gang();
} else {
abd->abd_flags |= ABD_FLAG_GANG;
list_create(&ABD_GANG(abd).abd_gang_chain,
sizeof (abd_t), offsetof(abd_t, abd_gang_link));
}
abd->abd_flags &= ~ABD_FLAG_OWNER;
for (abd_t *cabd = abd_gang_get_offset(sabd, &off);
cabd != NULL && left > 0;
cabd = list_next(&ABD_GANG(sabd).abd_gang_chain, cabd)) {
int csize = MIN(left, cabd->abd_size - off);
abd_t *nabd = abd_get_offset_size(cabd, off, csize);
abd_gang_add(abd, nabd, B_TRUE);
left -= csize;
off = 0;
}
ASSERT3U(left, ==, 0);
} else {
abd = abd_get_offset_scatter(abd, sabd, off, size);
}
ASSERT3P(abd, !=, NULL);
abd->abd_size = size;
#ifdef ZFS_DEBUG
abd->abd_parent = sabd;
(void) zfs_refcount_add_many(&sabd->abd_children, abd->abd_size, abd);
#endif
return (abd);
}
/*
* Like abd_get_offset_size(), but memory for the abd_t is provided by the
* caller. Using this routine can improve performance by avoiding the cost
* of allocating memory for the abd_t struct, and updating the abd stats.
* Usually, the provided abd is returned, but in some circumstances (FreeBSD,
* if sabd is scatter and size is more than 2 pages) a new abd_t may need to
* be allocated. Therefore callers should be careful to use the returned
* abd_t*.
*/
abd_t *
abd_get_offset_struct(abd_t *abd, abd_t *sabd, size_t off, size_t size)
{
abd_t *result;
abd_init_struct(abd);
result = abd_get_offset_impl(abd, sabd, off, size);
if (result != abd)
abd_fini_struct(abd);
return (result);
}
abd_t *
abd_get_offset(abd_t *sabd, size_t off)
{
size_t size = sabd->abd_size > off ? sabd->abd_size - off : 0;
VERIFY3U(size, >, 0);
return (abd_get_offset_impl(NULL, sabd, off, size));
}
abd_t *
abd_get_offset_size(abd_t *sabd, size_t off, size_t size)
{
ASSERT3U(off + size, <=, sabd->abd_size);
return (abd_get_offset_impl(NULL, sabd, off, size));
}
/*
* Return a size scatter ABD containing only zeros.
*/
abd_t *
abd_get_zeros(size_t size)
{
ASSERT3P(abd_zero_scatter, !=, NULL);
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
return (abd_get_offset_size(abd_zero_scatter, 0, size));
}
/*
* Allocate a linear ABD structure for buf.
*/
abd_t *
abd_get_from_buf(void *buf, size_t size)
{
abd_t *abd = abd_alloc_struct(0);
VERIFY3U(size, <=, SPA_MAXBLOCKSIZE);
/*
* Even if this buf is filesystem metadata, we only track that if we
* own the underlying data buffer, which is not true in this case.
* Therefore, we don't ever use ABD_FLAG_META here.
*/
abd->abd_flags |= ABD_FLAG_LINEAR;
abd->abd_size = size;
ABD_LINEAR_BUF(abd) = buf;
return (abd);
}
/*
* Get the raw buffer associated with a linear ABD.
*/
void *
abd_to_buf(abd_t *abd)
{
ASSERT(abd_is_linear(abd));
abd_verify(abd);
return (ABD_LINEAR_BUF(abd));
}
/*
* Borrow a raw buffer from an ABD without copying the contents of the ABD
* into the buffer. If the ABD is scattered, this will allocate a raw buffer
* whose contents are undefined. To copy over the existing data in the ABD, use
* abd_borrow_buf_copy() instead.
*/
void *
abd_borrow_buf(abd_t *abd, size_t n)
{
void *buf;
abd_verify(abd);
ASSERT3U(abd->abd_size, >=, n);
if (abd_is_linear(abd)) {
buf = abd_to_buf(abd);
} else {
buf = zio_buf_alloc(n);
}
#ifdef ZFS_DEBUG
(void) zfs_refcount_add_many(&abd->abd_children, n, buf);
#endif
return (buf);
}
void *
abd_borrow_buf_copy(abd_t *abd, size_t n)
{
void *buf = abd_borrow_buf(abd, n);
if (!abd_is_linear(abd)) {
abd_copy_to_buf(buf, abd, n);
}
return (buf);
}
/*
* Return a borrowed raw buffer to an ABD. If the ABD is scattered, this will
* not change the contents of the ABD and will ASSERT that you didn't modify
* the buffer since it was borrowed. If you want any changes you made to buf to
* be copied back to abd, use abd_return_buf_copy() instead.
*/
void
abd_return_buf(abd_t *abd, void *buf, size_t n)
{
abd_verify(abd);
ASSERT3U(abd->abd_size, >=, n);
if (abd_is_linear(abd)) {
ASSERT3P(buf, ==, abd_to_buf(abd));
} else {
ASSERT0(abd_cmp_buf(abd, buf, n));
zio_buf_free(buf, n);
}
#ifdef ZFS_DEBUG
(void) zfs_refcount_remove_many(&abd->abd_children, n, buf);
#endif
}
void
abd_return_buf_copy(abd_t *abd, void *buf, size_t n)
{
if (!abd_is_linear(abd)) {
abd_copy_from_buf(abd, buf, n);
}
abd_return_buf(abd, buf, n);
}
void
abd_release_ownership_of_buf(abd_t *abd)
{
ASSERT(abd_is_linear(abd));
ASSERT(abd->abd_flags & ABD_FLAG_OWNER);
/*
* abd_free() needs to handle LINEAR_PAGE ABD's specially.
* Since that flag does not survive the
* abd_release_ownership_of_buf() -> abd_get_from_buf() ->
* abd_take_ownership_of_buf() sequence, we don't allow releasing
* these "linear but not zio_[data_]buf_alloc()'ed" ABD's.
*/
ASSERT(!abd_is_linear_page(abd));
abd_verify(abd);
abd->abd_flags &= ~ABD_FLAG_OWNER;
/* Disable this flag since we no longer own the data buffer */
abd->abd_flags &= ~ABD_FLAG_META;
abd_update_linear_stats(abd, ABDSTAT_DECR);
}
/*
* Give this ABD ownership of the buffer that it's storing. Can only be used on
* linear ABDs which were allocated via abd_get_from_buf(), or ones allocated
* with abd_alloc_linear() which subsequently released ownership of their buf
* with abd_release_ownership_of_buf().
*/
void
abd_take_ownership_of_buf(abd_t *abd, boolean_t is_metadata)
{
ASSERT(abd_is_linear(abd));
ASSERT(!(abd->abd_flags & ABD_FLAG_OWNER));
abd_verify(abd);
abd->abd_flags |= ABD_FLAG_OWNER;
if (is_metadata) {
abd->abd_flags |= ABD_FLAG_META;
}
abd_update_linear_stats(abd, ABDSTAT_INCR);
}
/*
* Initializes an abd_iter based on whether the abd is a gang ABD
* or just a single ABD.
*/
static inline abd_t *
abd_init_abd_iter(abd_t *abd, struct abd_iter *aiter, size_t off)
{
abd_t *cabd = NULL;
if (abd_is_gang(abd)) {
cabd = abd_gang_get_offset(abd, &off);
if (cabd) {
abd_iter_init(aiter, cabd);
abd_iter_advance(aiter, off);
}
} else {
abd_iter_init(aiter, abd);
abd_iter_advance(aiter, off);
}
return (cabd);
}
/*
* Advances an abd_iter. We have to be careful with gang ABD as
* advancing could mean that we are at the end of a particular ABD and
* must grab the ABD in the gang ABD's list.
*/
static inline abd_t *
abd_advance_abd_iter(abd_t *abd, abd_t *cabd, struct abd_iter *aiter,
size_t len)
{
abd_iter_advance(aiter, len);
if (abd_is_gang(abd) && abd_iter_at_end(aiter)) {
ASSERT3P(cabd, !=, NULL);
cabd = list_next(&ABD_GANG(abd).abd_gang_chain, cabd);
if (cabd) {
abd_iter_init(aiter, cabd);
abd_iter_advance(aiter, 0);
}
}
return (cabd);
}
int
abd_iterate_func(abd_t *abd, size_t off, size_t size,
abd_iter_func_t *func, void *private)
{
struct abd_iter aiter;
int ret = 0;
if (size == 0)
return (0);
abd_verify(abd);
ASSERT3U(off + size, <=, abd->abd_size);
boolean_t gang = abd_is_gang(abd);
abd_t *c_abd = abd_init_abd_iter(abd, &aiter, off);
while (size > 0) {
/* If we are at the end of the gang ABD we are done */
if (gang && !c_abd)
break;
abd_iter_map(&aiter);
size_t len = MIN(aiter.iter_mapsize, size);
ASSERT3U(len, >, 0);
ret = func(aiter.iter_mapaddr, len, private);
abd_iter_unmap(&aiter);
if (ret != 0)
break;
size -= len;
c_abd = abd_advance_abd_iter(abd, c_abd, &aiter, len);
}
return (ret);
}
struct buf_arg {
void *arg_buf;
};
static int
abd_copy_to_buf_off_cb(void *buf, size_t size, void *private)
{
struct buf_arg *ba_ptr = private;
(void) memcpy(ba_ptr->arg_buf, buf, size);
ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
return (0);
}
/*
* Copy abd to buf. (off is the offset in abd.)
*/
void
abd_copy_to_buf_off(void *buf, abd_t *abd, size_t off, size_t size)
{
struct buf_arg ba_ptr = { buf };
(void) abd_iterate_func(abd, off, size, abd_copy_to_buf_off_cb,
&ba_ptr);
}
static int
abd_cmp_buf_off_cb(void *buf, size_t size, void *private)
{
int ret;
struct buf_arg *ba_ptr = private;
ret = memcmp(buf, ba_ptr->arg_buf, size);
ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
return (ret);
}
/*
* Compare the contents of abd to buf. (off is the offset in abd.)
*/
int
abd_cmp_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
{
struct buf_arg ba_ptr = { (void *) buf };
return (abd_iterate_func(abd, off, size, abd_cmp_buf_off_cb, &ba_ptr));
}
static int
abd_copy_from_buf_off_cb(void *buf, size_t size, void *private)
{
struct buf_arg *ba_ptr = private;
(void) memcpy(buf, ba_ptr->arg_buf, size);
ba_ptr->arg_buf = (char *)ba_ptr->arg_buf + size;
return (0);
}
/*
* Copy from buf to abd. (off is the offset in abd.)
*/
void
abd_copy_from_buf_off(abd_t *abd, const void *buf, size_t off, size_t size)
{
struct buf_arg ba_ptr = { (void *) buf };
(void) abd_iterate_func(abd, off, size, abd_copy_from_buf_off_cb,
&ba_ptr);
}
/*ARGSUSED*/
static int
abd_zero_off_cb(void *buf, size_t size, void *private)
{
(void) memset(buf, 0, size);
return (0);
}
/*
* Zero out the abd from a particular offset to the end.
*/
void
abd_zero_off(abd_t *abd, size_t off, size_t size)
{
(void) abd_iterate_func(abd, off, size, abd_zero_off_cb, NULL);
}
/*
* Iterate over two ABDs and call func incrementally on the two ABDs' data in
* equal-sized chunks (passed to func as raw buffers). func could be called many
* times during this iteration.
*/
int
abd_iterate_func2(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff,
size_t size, abd_iter_func2_t *func, void *private)
{
int ret = 0;
struct abd_iter daiter, saiter;
boolean_t dabd_is_gang_abd, sabd_is_gang_abd;
abd_t *c_dabd, *c_sabd;
if (size == 0)
return (0);
abd_verify(dabd);
abd_verify(sabd);
ASSERT3U(doff + size, <=, dabd->abd_size);
ASSERT3U(soff + size, <=, sabd->abd_size);
dabd_is_gang_abd = abd_is_gang(dabd);
sabd_is_gang_abd = abd_is_gang(sabd);
c_dabd = abd_init_abd_iter(dabd, &daiter, doff);
c_sabd = abd_init_abd_iter(sabd, &saiter, soff);
while (size > 0) {
/* if we are at the end of the gang ABD we are done */
if ((dabd_is_gang_abd && !c_dabd) ||
(sabd_is_gang_abd && !c_sabd))
break;
abd_iter_map(&daiter);
abd_iter_map(&saiter);
size_t dlen = MIN(daiter.iter_mapsize, size);
size_t slen = MIN(saiter.iter_mapsize, size);
size_t len = MIN(dlen, slen);
ASSERT(dlen > 0 || slen > 0);
ret = func(daiter.iter_mapaddr, saiter.iter_mapaddr, len,
private);
abd_iter_unmap(&saiter);
abd_iter_unmap(&daiter);
if (ret != 0)
break;
size -= len;
c_dabd =
abd_advance_abd_iter(dabd, c_dabd, &daiter, len);
c_sabd =
abd_advance_abd_iter(sabd, c_sabd, &saiter, len);
}
return (ret);
}
/*ARGSUSED*/
static int
abd_copy_off_cb(void *dbuf, void *sbuf, size_t size, void *private)
{
(void) memcpy(dbuf, sbuf, size);
return (0);
}
/*
* Copy from sabd to dabd starting from soff and doff.
*/
void
abd_copy_off(abd_t *dabd, abd_t *sabd, size_t doff, size_t soff, size_t size)
{
(void) abd_iterate_func2(dabd, sabd, doff, soff, size,
abd_copy_off_cb, NULL);
}
/*ARGSUSED*/
static int
abd_cmp_cb(void *bufa, void *bufb, size_t size, void *private)
{
return (memcmp(bufa, bufb, size));
}
/*
* Compares the contents of two ABDs.
*/
int
abd_cmp(abd_t *dabd, abd_t *sabd)
{
ASSERT3U(dabd->abd_size, ==, sabd->abd_size);
return (abd_iterate_func2(dabd, sabd, 0, 0, dabd->abd_size,
abd_cmp_cb, NULL));
}
/*
* Iterate over code ABDs and a data ABD and call @func_raidz_gen.
*
* @cabds parity ABDs, must have equal size
* @dabd data ABD. Can be NULL (in this case @dsize = 0)
* @func_raidz_gen should be implemented so that its behaviour
* is the same when taking linear and when taking scatter
*/
void
abd_raidz_gen_iterate(abd_t **cabds, abd_t *dabd,
ssize_t csize, ssize_t dsize, const unsigned parity,
void (*func_raidz_gen)(void **, const void *, size_t, size_t))
{
int i;
ssize_t len, dlen;
struct abd_iter caiters[3];
struct abd_iter daiter = {0};
void *caddrs[3];
unsigned long flags __maybe_unused = 0;
abd_t *c_cabds[3];
abd_t *c_dabd = NULL;
boolean_t cabds_is_gang_abd[3];
boolean_t dabd_is_gang_abd = B_FALSE;
ASSERT3U(parity, <=, 3);
for (i = 0; i < parity; i++) {
cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
c_cabds[i] = abd_init_abd_iter(cabds[i], &caiters[i], 0);
}
if (dabd) {
dabd_is_gang_abd = abd_is_gang(dabd);
c_dabd = abd_init_abd_iter(dabd, &daiter, 0);
}
ASSERT3S(dsize, >=, 0);
abd_enter_critical(flags);
while (csize > 0) {
/* if we are at the end of the gang ABD we are done */
if (dabd_is_gang_abd && !c_dabd)
break;
for (i = 0; i < parity; i++) {
/*
* If we are at the end of the gang ABD we are
* done.
*/
if (cabds_is_gang_abd[i] && !c_cabds[i])
break;
abd_iter_map(&caiters[i]);
caddrs[i] = caiters[i].iter_mapaddr;
}
len = csize;
if (dabd && dsize > 0)
abd_iter_map(&daiter);
switch (parity) {
case 3:
len = MIN(caiters[2].iter_mapsize, len);
- /* falls through */
+ fallthrough;
case 2:
len = MIN(caiters[1].iter_mapsize, len);
- /* falls through */
+ fallthrough;
case 1:
len = MIN(caiters[0].iter_mapsize, len);
}
/* must be progressive */
ASSERT3S(len, >, 0);
if (dabd && dsize > 0) {
/* this needs precise iter.length */
len = MIN(daiter.iter_mapsize, len);
dlen = len;
} else
dlen = 0;
/* must be progressive */
ASSERT3S(len, >, 0);
/*
* The iterated function likely will not do well if each
* segment except the last one is not multiple of 512 (raidz).
*/
ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
func_raidz_gen(caddrs, daiter.iter_mapaddr, len, dlen);
for (i = parity-1; i >= 0; i--) {
abd_iter_unmap(&caiters[i]);
c_cabds[i] =
abd_advance_abd_iter(cabds[i], c_cabds[i],
&caiters[i], len);
}
if (dabd && dsize > 0) {
abd_iter_unmap(&daiter);
c_dabd =
abd_advance_abd_iter(dabd, c_dabd, &daiter,
dlen);
dsize -= dlen;
}
csize -= len;
ASSERT3S(dsize, >=, 0);
ASSERT3S(csize, >=, 0);
}
abd_exit_critical(flags);
}
/*
* Iterate over code ABDs and data reconstruction target ABDs and call
* @func_raidz_rec. Function maps at most 6 pages atomically.
*
* @cabds parity ABDs, must have equal size
* @tabds rec target ABDs, at most 3
* @tsize size of data target columns
* @func_raidz_rec expects syndrome data in target columns. Function
* reconstructs data and overwrites target columns.
*/
void
abd_raidz_rec_iterate(abd_t **cabds, abd_t **tabds,
ssize_t tsize, const unsigned parity,
void (*func_raidz_rec)(void **t, const size_t tsize, void **c,
const unsigned *mul),
const unsigned *mul)
{
int i;
ssize_t len;
struct abd_iter citers[3];
struct abd_iter xiters[3];
void *caddrs[3], *xaddrs[3];
unsigned long flags __maybe_unused = 0;
boolean_t cabds_is_gang_abd[3];
boolean_t tabds_is_gang_abd[3];
abd_t *c_cabds[3];
abd_t *c_tabds[3];
ASSERT3U(parity, <=, 3);
for (i = 0; i < parity; i++) {
cabds_is_gang_abd[i] = abd_is_gang(cabds[i]);
tabds_is_gang_abd[i] = abd_is_gang(tabds[i]);
c_cabds[i] =
abd_init_abd_iter(cabds[i], &citers[i], 0);
c_tabds[i] =
abd_init_abd_iter(tabds[i], &xiters[i], 0);
}
abd_enter_critical(flags);
while (tsize > 0) {
for (i = 0; i < parity; i++) {
/*
* If we are at the end of the gang ABD we
* are done.
*/
if (cabds_is_gang_abd[i] && !c_cabds[i])
break;
if (tabds_is_gang_abd[i] && !c_tabds[i])
break;
abd_iter_map(&citers[i]);
abd_iter_map(&xiters[i]);
caddrs[i] = citers[i].iter_mapaddr;
xaddrs[i] = xiters[i].iter_mapaddr;
}
len = tsize;
switch (parity) {
case 3:
len = MIN(xiters[2].iter_mapsize, len);
len = MIN(citers[2].iter_mapsize, len);
- /* falls through */
+ fallthrough;
case 2:
len = MIN(xiters[1].iter_mapsize, len);
len = MIN(citers[1].iter_mapsize, len);
- /* falls through */
+ fallthrough;
case 1:
len = MIN(xiters[0].iter_mapsize, len);
len = MIN(citers[0].iter_mapsize, len);
}
/* must be progressive */
ASSERT3S(len, >, 0);
/*
* The iterated function likely will not do well if each
* segment except the last one is not multiple of 512 (raidz).
*/
ASSERT3U(((uint64_t)len & 511ULL), ==, 0);
func_raidz_rec(xaddrs, len, caddrs, mul);
for (i = parity-1; i >= 0; i--) {
abd_iter_unmap(&xiters[i]);
abd_iter_unmap(&citers[i]);
c_tabds[i] =
abd_advance_abd_iter(tabds[i], c_tabds[i],
&xiters[i], len);
c_cabds[i] =
abd_advance_abd_iter(cabds[i], c_cabds[i],
&citers[i], len);
}
tsize -= len;
ASSERT3S(tsize, >=, 0);
}
abd_exit_critical(flags);
}
diff --git a/sys/contrib/openzfs/module/zfs/arc.c b/sys/contrib/openzfs/module/zfs/arc.c
index 227d0417c765..6acd3631348e 100644
--- a/sys/contrib/openzfs/module/zfs/arc.c
+++ b/sys/contrib/openzfs/module/zfs/arc.c
@@ -1,11110 +1,11116 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Joyent, Inc.
* Copyright (c) 2011, 2020, Delphix. All rights reserved.
* Copyright (c) 2014, Saso Kiselkov. All rights reserved.
* Copyright (c) 2017, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2019, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2020, George Amanakis. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2020, The FreeBSD Foundation [1]
*
* [1] Portions of this software were developed by Allan Jude
* under sponsorship from the FreeBSD Foundation.
*/
/*
* DVA-based Adjustable Replacement Cache
*
* While much of the theory of operation used here is
* based on the self-tuning, low overhead replacement cache
* presented by Megiddo and Modha at FAST 2003, there are some
* significant differences:
*
* 1. The Megiddo and Modha model assumes any page is evictable.
* Pages in its cache cannot be "locked" into memory. This makes
* the eviction algorithm simple: evict the last page in the list.
* This also make the performance characteristics easy to reason
* about. Our cache is not so simple. At any given moment, some
* subset of the blocks in the cache are un-evictable because we
* have handed out a reference to them. Blocks are only evictable
* when there are no external references active. This makes
* eviction far more problematic: we choose to evict the evictable
* blocks that are the "lowest" in the list.
*
* There are times when it is not possible to evict the requested
* space. In these circumstances we are unable to adjust the cache
* size. To prevent the cache growing unbounded at these times we
* implement a "cache throttle" that slows the flow of new data
* into the cache until we can make space available.
*
* 2. The Megiddo and Modha model assumes a fixed cache size.
* Pages are evicted when the cache is full and there is a cache
* miss. Our model has a variable sized cache. It grows with
* high use, but also tries to react to memory pressure from the
* operating system: decreasing its size when system memory is
* tight.
*
* 3. The Megiddo and Modha model assumes a fixed page size. All
* elements of the cache are therefore exactly the same size. So
* when adjusting the cache size following a cache miss, its simply
* a matter of choosing a single page to evict. In our model, we
* have variable sized cache blocks (ranging from 512 bytes to
* 128K bytes). We therefore choose a set of blocks to evict to make
* space for a cache miss that approximates as closely as possible
* the space used by the new block.
*
* See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
* by N. Megiddo & D. Modha, FAST 2003
*/
/*
* The locking model:
*
* A new reference to a cache buffer can be obtained in two
* ways: 1) via a hash table lookup using the DVA as a key,
* or 2) via one of the ARC lists. The arc_read() interface
* uses method 1, while the internal ARC algorithms for
* adjusting the cache use method 2. We therefore provide two
* types of locks: 1) the hash table lock array, and 2) the
* ARC list locks.
*
* Buffers do not have their own mutexes, rather they rely on the
* hash table mutexes for the bulk of their protection (i.e. most
* fields in the arc_buf_hdr_t are protected by these mutexes).
*
* buf_hash_find() returns the appropriate mutex (held) when it
* locates the requested buffer in the hash table. It returns
* NULL for the mutex if the buffer was not in the table.
*
* buf_hash_remove() expects the appropriate hash mutex to be
* already held before it is invoked.
*
* Each ARC state also has a mutex which is used to protect the
* buffer list associated with the state. When attempting to
* obtain a hash table lock while holding an ARC list lock you
* must use: mutex_tryenter() to avoid deadlock. Also note that
* the active state mutex must be held before the ghost state mutex.
*
* It as also possible to register a callback which is run when the
* arc_meta_limit is reached and no buffers can be safely evicted. In
* this case the arc user should drop a reference on some arc buffers so
* they can be reclaimed and the arc_meta_limit honored. For example,
* when using the ZPL each dentry holds a references on a znode. These
* dentries must be pruned before the arc buffer holding the znode can
* be safely evicted.
*
* Note that the majority of the performance stats are manipulated
* with atomic operations.
*
* The L2ARC uses the l2ad_mtx on each vdev for the following:
*
* - L2ARC buflist creation
* - L2ARC buflist eviction
* - L2ARC write completion, which walks L2ARC buflists
* - ARC header destruction, as it removes from L2ARC buflists
* - ARC header release, as it removes from L2ARC buflists
*/
/*
* ARC operation:
*
* Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
* This structure can point either to a block that is still in the cache or to
* one that is only accessible in an L2 ARC device, or it can provide
* information about a block that was recently evicted. If a block is
* only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
* information to retrieve it from the L2ARC device. This information is
* stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
* that is in this state cannot access the data directly.
*
* Blocks that are actively being referenced or have not been evicted
* are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
* the arc_buf_hdr_t that will point to the data block in memory. A block can
* only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
* caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
* also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
*
* The L1ARC's data pointer may or may not be uncompressed. The ARC has the
* ability to store the physical data (b_pabd) associated with the DVA of the
* arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
* it will match its on-disk compression characteristics. This behavior can be
* disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
* compressed ARC functionality is disabled, the b_pabd will point to an
* uncompressed version of the on-disk data.
*
* Data in the L1ARC is not accessed by consumers of the ARC directly. Each
* arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
* Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
* consumer. The ARC will provide references to this data and will keep it
* cached until it is no longer in use. The ARC caches only the L1ARC's physical
* data block and will evict any arc_buf_t that is no longer referenced. The
* amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
* "overhead_size" kstat.
*
* Depending on the consumer, an arc_buf_t can be requested in uncompressed or
* compressed form. The typical case is that consumers will want uncompressed
* data, and when that happens a new data buffer is allocated where the data is
* decompressed for them to use. Currently the only consumer who wants
* compressed arc_buf_t's is "zfs send", when it streams data exactly as it
* exists on disk. When this happens, the arc_buf_t's data buffer is shared
* with the arc_buf_hdr_t.
*
* Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
* first one is owned by a compressed send consumer (and therefore references
* the same compressed data buffer as the arc_buf_hdr_t) and the second could be
* used by any other consumer (and has its own uncompressed copy of the data
* buffer).
*
* arc_buf_hdr_t
* +-----------+
* | fields |
* | common to |
* | L1- and |
* | L2ARC |
* +-----------+
* | l2arc_buf_hdr_t
* | |
* +-----------+
* | l1arc_buf_hdr_t
* | | arc_buf_t
* | b_buf +------------>+-----------+ arc_buf_t
* | b_pabd +-+ |b_next +---->+-----------+
* +-----------+ | |-----------| |b_next +-->NULL
* | |b_comp = T | +-----------+
* | |b_data +-+ |b_comp = F |
* | +-----------+ | |b_data +-+
* +->+------+ | +-----------+ |
* compressed | | | |
* data | |<--------------+ | uncompressed
* +------+ compressed, | data
* shared +-->+------+
* data | |
* | |
* +------+
*
* When a consumer reads a block, the ARC must first look to see if the
* arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
* arc_buf_t and either copies uncompressed data into a new data buffer from an
* existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
* new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
* hdr is compressed and the desired compression characteristics of the
* arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
* arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
* the last buffer in the hdr's b_buf list, however a shared compressed buf can
* be anywhere in the hdr's list.
*
* The diagram below shows an example of an uncompressed ARC hdr that is
* sharing its data with an arc_buf_t (note that the shared uncompressed buf is
* the last element in the buf list):
*
* arc_buf_hdr_t
* +-----------+
* | |
* | |
* | |
* +-----------+
* l2arc_buf_hdr_t| |
* | |
* +-----------+
* l1arc_buf_hdr_t| |
* | | arc_buf_t (shared)
* | b_buf +------------>+---------+ arc_buf_t
* | | |b_next +---->+---------+
* | b_pabd +-+ |---------| |b_next +-->NULL
* +-----------+ | | | +---------+
* | |b_data +-+ | |
* | +---------+ | |b_data +-+
* +->+------+ | +---------+ |
* | | | |
* uncompressed | | | |
* data +------+ | |
* ^ +->+------+ |
* | uncompressed | | |
* | data | | |
* | +------+ |
* +---------------------------------+
*
* Writing to the ARC requires that the ARC first discard the hdr's b_pabd
* since the physical block is about to be rewritten. The new data contents
* will be contained in the arc_buf_t. As the I/O pipeline performs the write,
* it may compress the data before writing it to disk. The ARC will be called
* with the transformed data and will bcopy the transformed on-disk block into
* a newly allocated b_pabd. Writes are always done into buffers which have
* either been loaned (and hence are new and don't have other readers) or
* buffers which have been released (and hence have their own hdr, if there
* were originally other readers of the buf's original hdr). This ensures that
* the ARC only needs to update a single buf and its hdr after a write occurs.
*
* When the L2ARC is in use, it will also take advantage of the b_pabd. The
* L2ARC will always write the contents of b_pabd to the L2ARC. This means
* that when compressed ARC is enabled that the L2ARC blocks are identical
* to the on-disk block in the main data pool. This provides a significant
* advantage since the ARC can leverage the bp's checksum when reading from the
* L2ARC to determine if the contents are valid. However, if the compressed
* ARC is disabled, then the L2ARC's block must be transformed to look
* like the physical block in the main data pool before comparing the
* checksum and determining its validity.
*
* The L1ARC has a slightly different system for storing encrypted data.
* Raw (encrypted + possibly compressed) data has a few subtle differences from
* data that is just compressed. The biggest difference is that it is not
* possible to decrypt encrypted data (or vice-versa) if the keys aren't loaded.
* The other difference is that encryption cannot be treated as a suggestion.
* If a caller would prefer compressed data, but they actually wind up with
* uncompressed data the worst thing that could happen is there might be a
* performance hit. If the caller requests encrypted data, however, we must be
* sure they actually get it or else secret information could be leaked. Raw
* data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore,
* may have both an encrypted version and a decrypted version of its data at
* once. When a caller needs a raw arc_buf_t, it is allocated and the data is
* copied out of this header. To avoid complications with b_pabd, raw buffers
* cannot be shared.
*/
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/zfs_refcount.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/dsl_pool.h>
#include <sys/multilist.h>
#include <sys/abd.h>
#include <sys/zil.h>
#include <sys/fm/fs/zfs.h>
#include <sys/callb.h>
#include <sys/kstat.h>
#include <sys/zthr.h>
#include <zfs_fletcher.h>
#include <sys/arc_impl.h>
#include <sys/trace_zfs.h>
#include <sys/aggsum.h>
#include <sys/wmsum.h>
#include <cityhash.h>
#include <sys/vdev_trim.h>
#include <sys/zfs_racct.h>
#include <sys/zstd/zstd.h>
#ifndef _KERNEL
/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
boolean_t arc_watch = B_FALSE;
#endif
/*
* This thread's job is to keep enough free memory in the system, by
* calling arc_kmem_reap_soon() plus arc_reduce_target_size(), which improves
* arc_available_memory().
*/
static zthr_t *arc_reap_zthr;
/*
* This thread's job is to keep arc_size under arc_c, by calling
* arc_evict(), which improves arc_is_overflowing().
*/
static zthr_t *arc_evict_zthr;
static kmutex_t arc_evict_lock;
static boolean_t arc_evict_needed = B_FALSE;
/*
* Count of bytes evicted since boot.
*/
static uint64_t arc_evict_count;
/*
* List of arc_evict_waiter_t's, representing threads waiting for the
* arc_evict_count to reach specific values.
*/
static list_t arc_evict_waiters;
/*
* When arc_is_overflowing(), arc_get_data_impl() waits for this percent of
* the requested amount of data to be evicted. For example, by default for
* every 2KB that's evicted, 1KB of it may be "reused" by a new allocation.
* Since this is above 100%, it ensures that progress is made towards getting
* arc_size under arc_c. Since this is finite, it ensures that allocations
* can still happen, even during the potentially long time that arc_size is
* more than arc_c.
*/
int zfs_arc_eviction_pct = 200;
/*
* The number of headers to evict in arc_evict_state_impl() before
* dropping the sublist lock and evicting from another sublist. A lower
* value means we're more likely to evict the "correct" header (i.e. the
* oldest header in the arc state), but comes with higher overhead
* (i.e. more invocations of arc_evict_state_impl()).
*/
int zfs_arc_evict_batch_limit = 10;
/* number of seconds before growing cache again */
int arc_grow_retry = 5;
/*
* Minimum time between calls to arc_kmem_reap_soon().
*/
int arc_kmem_cache_reap_retry_ms = 1000;
/* shift of arc_c for calculating overflow limit in arc_get_data_impl */
int zfs_arc_overflow_shift = 8;
/* shift of arc_c for calculating both min and max arc_p */
int arc_p_min_shift = 4;
/* log2(fraction of arc to reclaim) */
int arc_shrink_shift = 7;
/* percent of pagecache to reclaim arc to */
#ifdef _KERNEL
uint_t zfs_arc_pc_percent = 0;
#endif
/*
* log2(fraction of ARC which must be free to allow growing).
* I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
* when reading a new block into the ARC, we will evict an equal-sized block
* from the ARC.
*
* This must be less than arc_shrink_shift, so that when we shrink the ARC,
* we will still not allow it to grow.
*/
int arc_no_grow_shift = 5;
/*
* minimum lifespan of a prefetch block in clock ticks
* (initialized in arc_init())
*/
static int arc_min_prefetch_ms;
static int arc_min_prescient_prefetch_ms;
/*
* If this percent of memory is free, don't throttle.
*/
int arc_lotsfree_percent = 10;
/*
* The arc has filled available memory and has now warmed up.
*/
boolean_t arc_warm;
/*
* These tunables are for performance analysis.
*/
unsigned long zfs_arc_max = 0;
unsigned long zfs_arc_min = 0;
unsigned long zfs_arc_meta_limit = 0;
unsigned long zfs_arc_meta_min = 0;
unsigned long zfs_arc_dnode_limit = 0;
unsigned long zfs_arc_dnode_reduce_percent = 10;
int zfs_arc_grow_retry = 0;
int zfs_arc_shrink_shift = 0;
int zfs_arc_p_min_shift = 0;
int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
/*
* ARC dirty data constraints for arc_tempreserve_space() throttle.
*/
unsigned long zfs_arc_dirty_limit_percent = 50; /* total dirty data limit */
unsigned long zfs_arc_anon_limit_percent = 25; /* anon block dirty limit */
unsigned long zfs_arc_pool_dirty_percent = 20; /* each pool's anon allowance */
/*
* Enable or disable compressed arc buffers.
*/
int zfs_compressed_arc_enabled = B_TRUE;
/*
* ARC will evict meta buffers that exceed arc_meta_limit. This
* tunable make arc_meta_limit adjustable for different workloads.
*/
unsigned long zfs_arc_meta_limit_percent = 75;
/*
* Percentage that can be consumed by dnodes of ARC meta buffers.
*/
unsigned long zfs_arc_dnode_limit_percent = 10;
/*
* These tunables are Linux specific
*/
unsigned long zfs_arc_sys_free = 0;
int zfs_arc_min_prefetch_ms = 0;
int zfs_arc_min_prescient_prefetch_ms = 0;
int zfs_arc_p_dampener_disable = 1;
int zfs_arc_meta_prune = 10000;
int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED;
int zfs_arc_meta_adjust_restarts = 4096;
int zfs_arc_lotsfree_percent = 10;
/* The 6 states: */
arc_state_t ARC_anon;
arc_state_t ARC_mru;
arc_state_t ARC_mru_ghost;
arc_state_t ARC_mfu;
arc_state_t ARC_mfu_ghost;
arc_state_t ARC_l2c_only;
arc_stats_t arc_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "demand_data_hits", KSTAT_DATA_UINT64 },
{ "demand_data_misses", KSTAT_DATA_UINT64 },
{ "demand_metadata_hits", KSTAT_DATA_UINT64 },
{ "demand_metadata_misses", KSTAT_DATA_UINT64 },
{ "prefetch_data_hits", KSTAT_DATA_UINT64 },
{ "prefetch_data_misses", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
{ "mru_hits", KSTAT_DATA_UINT64 },
{ "mru_ghost_hits", KSTAT_DATA_UINT64 },
{ "mfu_hits", KSTAT_DATA_UINT64 },
{ "mfu_ghost_hits", KSTAT_DATA_UINT64 },
{ "deleted", KSTAT_DATA_UINT64 },
{ "mutex_miss", KSTAT_DATA_UINT64 },
{ "access_skip", KSTAT_DATA_UINT64 },
{ "evict_skip", KSTAT_DATA_UINT64 },
{ "evict_not_enough", KSTAT_DATA_UINT64 },
{ "evict_l2_cached", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mfu", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible_mru", KSTAT_DATA_UINT64 },
{ "evict_l2_ineligible", KSTAT_DATA_UINT64 },
{ "evict_l2_skip", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "p", KSTAT_DATA_UINT64 },
{ "c", KSTAT_DATA_UINT64 },
{ "c_min", KSTAT_DATA_UINT64 },
{ "c_max", KSTAT_DATA_UINT64 },
{ "size", KSTAT_DATA_UINT64 },
{ "compressed_size", KSTAT_DATA_UINT64 },
{ "uncompressed_size", KSTAT_DATA_UINT64 },
{ "overhead_size", KSTAT_DATA_UINT64 },
{ "hdr_size", KSTAT_DATA_UINT64 },
{ "data_size", KSTAT_DATA_UINT64 },
{ "metadata_size", KSTAT_DATA_UINT64 },
{ "dbuf_size", KSTAT_DATA_UINT64 },
{ "dnode_size", KSTAT_DATA_UINT64 },
{ "bonus_size", KSTAT_DATA_UINT64 },
#if defined(COMPAT_FREEBSD11)
{ "other_size", KSTAT_DATA_UINT64 },
#endif
{ "anon_size", KSTAT_DATA_UINT64 },
{ "anon_evictable_data", KSTAT_DATA_UINT64 },
{ "anon_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_size", KSTAT_DATA_UINT64 },
{ "mru_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_ghost_size", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_size", KSTAT_DATA_UINT64 },
{ "mfu_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_ghost_size", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "l2_hits", KSTAT_DATA_UINT64 },
{ "l2_misses", KSTAT_DATA_UINT64 },
{ "l2_prefetch_asize", KSTAT_DATA_UINT64 },
{ "l2_mru_asize", KSTAT_DATA_UINT64 },
{ "l2_mfu_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_data_asize", KSTAT_DATA_UINT64 },
{ "l2_bufc_metadata_asize", KSTAT_DATA_UINT64 },
{ "l2_feeds", KSTAT_DATA_UINT64 },
{ "l2_rw_clash", KSTAT_DATA_UINT64 },
{ "l2_read_bytes", KSTAT_DATA_UINT64 },
{ "l2_write_bytes", KSTAT_DATA_UINT64 },
{ "l2_writes_sent", KSTAT_DATA_UINT64 },
{ "l2_writes_done", KSTAT_DATA_UINT64 },
{ "l2_writes_error", KSTAT_DATA_UINT64 },
{ "l2_writes_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_reading", KSTAT_DATA_UINT64 },
{ "l2_evict_l1cached", KSTAT_DATA_UINT64 },
{ "l2_free_on_write", KSTAT_DATA_UINT64 },
{ "l2_abort_lowmem", KSTAT_DATA_UINT64 },
{ "l2_cksum_bad", KSTAT_DATA_UINT64 },
{ "l2_io_error", KSTAT_DATA_UINT64 },
{ "l2_size", KSTAT_DATA_UINT64 },
{ "l2_asize", KSTAT_DATA_UINT64 },
{ "l2_hdr_size", KSTAT_DATA_UINT64 },
{ "l2_log_blk_writes", KSTAT_DATA_UINT64 },
{ "l2_log_blk_avg_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_asize", KSTAT_DATA_UINT64 },
{ "l2_log_blk_count", KSTAT_DATA_UINT64 },
{ "l2_data_to_meta_ratio", KSTAT_DATA_UINT64 },
{ "l2_rebuild_success", KSTAT_DATA_UINT64 },
{ "l2_rebuild_unsupported", KSTAT_DATA_UINT64 },
{ "l2_rebuild_io_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_dh_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_cksum_lb_errors", KSTAT_DATA_UINT64 },
{ "l2_rebuild_lowmem", KSTAT_DATA_UINT64 },
{ "l2_rebuild_size", KSTAT_DATA_UINT64 },
{ "l2_rebuild_asize", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs", KSTAT_DATA_UINT64 },
{ "l2_rebuild_bufs_precached", KSTAT_DATA_UINT64 },
{ "l2_rebuild_log_blks", KSTAT_DATA_UINT64 },
{ "memory_throttle_count", KSTAT_DATA_UINT64 },
{ "memory_direct_count", KSTAT_DATA_UINT64 },
{ "memory_indirect_count", KSTAT_DATA_UINT64 },
{ "memory_all_bytes", KSTAT_DATA_UINT64 },
{ "memory_free_bytes", KSTAT_DATA_UINT64 },
{ "memory_available_bytes", KSTAT_DATA_INT64 },
{ "arc_no_grow", KSTAT_DATA_UINT64 },
{ "arc_tempreserve", KSTAT_DATA_UINT64 },
{ "arc_loaned_bytes", KSTAT_DATA_UINT64 },
{ "arc_prune", KSTAT_DATA_UINT64 },
{ "arc_meta_used", KSTAT_DATA_UINT64 },
{ "arc_meta_limit", KSTAT_DATA_UINT64 },
{ "arc_dnode_limit", KSTAT_DATA_UINT64 },
{ "arc_meta_max", KSTAT_DATA_UINT64 },
{ "arc_meta_min", KSTAT_DATA_UINT64 },
{ "async_upgrade_sync", KSTAT_DATA_UINT64 },
{ "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
{ "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64 },
{ "arc_need_free", KSTAT_DATA_UINT64 },
{ "arc_sys_free", KSTAT_DATA_UINT64 },
{ "arc_raw_size", KSTAT_DATA_UINT64 },
{ "cached_only_in_progress", KSTAT_DATA_UINT64 },
{ "abd_chunk_waste_size", KSTAT_DATA_UINT64 },
};
arc_sums_t arc_sums;
#define ARCSTAT_MAX(stat, val) { \
uint64_t m; \
while ((val) > (m = arc_stats.stat.value.ui64) && \
(m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
continue; \
}
/*
* We define a macro to allow ARC hits/misses to be easily broken down by
* two separate conditions, giving a total of four different subtypes for
* each of hits and misses (so eight statistics total).
*/
#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
if (cond1) { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
} \
} else { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
} \
}
/*
* This macro allows us to use kstats as floating averages. Each time we
* update this kstat, we first factor it and the update value by
* ARCSTAT_AVG_FACTOR to shrink the new value's contribution to the overall
* average. This macro assumes that integer loads and stores are atomic, but
* is not safe for multiple writers updating the kstat in parallel (only the
* last writer's update will remain).
*/
#define ARCSTAT_F_AVG_FACTOR 3
#define ARCSTAT_F_AVG(stat, value) \
do { \
uint64_t x = ARCSTAT(stat); \
x = x - x / ARCSTAT_F_AVG_FACTOR + \
(value) / ARCSTAT_F_AVG_FACTOR; \
ARCSTAT(stat) = x; \
} while (0)
kstat_t *arc_ksp;
/*
* There are several ARC variables that are critical to export as kstats --
* but we don't want to have to grovel around in the kstat whenever we wish to
* manipulate them. For these variables, we therefore define them to be in
* terms of the statistic variable. This assures that we are not introducing
* the possibility of inconsistency by having shadow copies of the variables,
* while still allowing the code to be readable.
*/
#define arc_tempreserve ARCSTAT(arcstat_tempreserve)
#define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes)
#define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
/* max size for dnodes */
#define arc_dnode_size_limit ARCSTAT(arcstat_dnode_limit)
#define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */
#define arc_need_free ARCSTAT(arcstat_need_free) /* waiting to be evicted */
hrtime_t arc_growtime;
list_t arc_prune_list;
kmutex_t arc_prune_mtx;
taskq_t *arc_prune_taskq;
#define GHOST_STATE(state) \
((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
(state) == arc_l2c_only)
#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
#define HDR_PRESCIENT_PREFETCH(hdr) \
((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH)
#define HDR_COMPRESSION_ENABLED(hdr) \
((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
#define HDR_L2_READING(hdr) \
(((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
#define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED)
#define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH)
#define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
#define HDR_ISTYPE_METADATA(hdr) \
((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
#define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr))
#define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
#define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
#define HDR_HAS_RABD(hdr) \
(HDR_HAS_L1HDR(hdr) && HDR_PROTECTED(hdr) && \
(hdr)->b_crypt_hdr.b_rabd != NULL)
#define HDR_ENCRYPTED(hdr) \
(HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
#define HDR_AUTHENTICATED(hdr) \
(HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
/* For storing compression mode in b_flags */
#define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
#define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
#define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
#define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
#define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
#define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
#define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED)
/*
* Other sizes
*/
#define HDR_FULL_CRYPT_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
#define HDR_FULL_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_crypt_hdr))
#define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
/*
* Hash table routines
*/
#define BUF_LOCKS 2048
typedef struct buf_hash_table {
uint64_t ht_mask;
arc_buf_hdr_t **ht_table;
kmutex_t ht_locks[BUF_LOCKS] ____cacheline_aligned;
} buf_hash_table_t;
static buf_hash_table_t buf_hash_table;
#define BUF_HASH_INDEX(spa, dva, birth) \
(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
#define BUF_HASH_LOCK(idx) (&buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
#define HDR_LOCK(hdr) \
(BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
uint64_t zfs_crc64_table[256];
/*
* Level 2 ARC
*/
#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
#define L2ARC_HEADROOM 2 /* num of writes */
/*
* If we discover during ARC scan any buffers to be compressed, we boost
* our headroom for the next scanning cycle by this percentage multiple.
*/
#define L2ARC_HEADROOM_BOOST 200
#define L2ARC_FEED_SECS 1 /* caching interval secs */
#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
/*
* We can feed L2ARC from two states of ARC buffers, mru and mfu,
* and each of the state has two types: data and metadata.
*/
#define L2ARC_FEED_TYPES 4
/* L2ARC Performance Tunables */
unsigned long l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */
unsigned long l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */
unsigned long l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */
unsigned long l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
unsigned long l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */
int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
int l2arc_feed_again = B_TRUE; /* turbo warmup */
int l2arc_norw = B_FALSE; /* no reads during writes */
int l2arc_meta_percent = 33; /* limit on headers size */
/*
* L2ARC Internals
*/
static list_t L2ARC_dev_list; /* device list */
static list_t *l2arc_dev_list; /* device list pointer */
static kmutex_t l2arc_dev_mtx; /* device list mutex */
static l2arc_dev_t *l2arc_dev_last; /* last device used */
static list_t L2ARC_free_on_write; /* free after write buf list */
static list_t *l2arc_free_on_write; /* free after write list ptr */
static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
static uint64_t l2arc_ndev; /* number of devices */
typedef struct l2arc_read_callback {
arc_buf_hdr_t *l2rcb_hdr; /* read header */
blkptr_t l2rcb_bp; /* original blkptr */
zbookmark_phys_t l2rcb_zb; /* original bookmark */
int l2rcb_flags; /* original flags */
abd_t *l2rcb_abd; /* temporary buffer */
} l2arc_read_callback_t;
typedef struct l2arc_data_free {
/* protected by l2arc_free_on_write_mtx */
abd_t *l2df_abd;
size_t l2df_size;
arc_buf_contents_t l2df_type;
list_node_t l2df_list_node;
} l2arc_data_free_t;
typedef enum arc_fill_flags {
ARC_FILL_LOCKED = 1 << 0, /* hdr lock is held */
ARC_FILL_COMPRESSED = 1 << 1, /* fill with compressed data */
ARC_FILL_ENCRYPTED = 1 << 2, /* fill with encrypted data */
ARC_FILL_NOAUTH = 1 << 3, /* don't attempt to authenticate */
ARC_FILL_IN_PLACE = 1 << 4 /* fill in place (special case) */
} arc_fill_flags_t;
typedef enum arc_ovf_level {
ARC_OVF_NONE, /* ARC within target size. */
ARC_OVF_SOME, /* ARC is slightly overflowed. */
ARC_OVF_SEVERE /* ARC is severely overflowed. */
} arc_ovf_level_t;
static kmutex_t l2arc_feed_thr_lock;
static kcondvar_t l2arc_feed_thr_cv;
static uint8_t l2arc_thread_exit;
static kmutex_t l2arc_rebuild_thr_lock;
static kcondvar_t l2arc_rebuild_thr_cv;
enum arc_hdr_alloc_flags {
ARC_HDR_ALLOC_RDATA = 0x1,
ARC_HDR_DO_ADAPT = 0x2,
ARC_HDR_USE_RESERVE = 0x4,
};
static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *, int);
static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *);
static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *, int);
static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *);
static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *);
static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag);
static void arc_hdr_free_abd(arc_buf_hdr_t *, boolean_t);
static void arc_hdr_alloc_abd(arc_buf_hdr_t *, int);
static void arc_access(arc_buf_hdr_t *, kmutex_t *);
static void arc_buf_watch(arc_buf_t *);
static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
static void l2arc_read_done(zio_t *);
static void l2arc_do_free_on_write(void);
static void l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only);
#define l2arc_hdr_arcstats_increment(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_FALSE)
#define l2arc_hdr_arcstats_decrement(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_FALSE)
#define l2arc_hdr_arcstats_increment_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_TRUE, B_TRUE)
#define l2arc_hdr_arcstats_decrement_state(hdr) \
l2arc_hdr_arcstats_update((hdr), B_FALSE, B_TRUE)
/*
* l2arc_mfuonly : A ZFS module parameter that controls whether only MFU
* metadata and data are cached from ARC into L2ARC.
*/
int l2arc_mfuonly = 0;
/*
* L2ARC TRIM
* l2arc_trim_ahead : A ZFS module parameter that controls how much ahead of
* the current write size (l2arc_write_max) we should TRIM if we
* have filled the device. It is defined as a percentage of the
* write size. If set to 100 we trim twice the space required to
* accommodate upcoming writes. A minimum of 64MB will be trimmed.
* It also enables TRIM of the whole L2ARC device upon creation or
* addition to an existing pool or if the header of the device is
* invalid upon importing a pool or onlining a cache device. The
* default is 0, which disables TRIM on L2ARC altogether as it can
* put significant stress on the underlying storage devices. This
* will vary depending of how well the specific device handles
* these commands.
*/
unsigned long l2arc_trim_ahead = 0;
/*
* Performance tuning of L2ARC persistence:
*
* l2arc_rebuild_enabled : A ZFS module parameter that controls whether adding
* an L2ARC device (either at pool import or later) will attempt
* to rebuild L2ARC buffer contents.
* l2arc_rebuild_blocks_min_l2size : A ZFS module parameter that controls
* whether log blocks are written to the L2ARC device. If the L2ARC
* device is less than 1GB, the amount of data l2arc_evict()
* evicts is significant compared to the amount of restored L2ARC
* data. In this case do not write log blocks in L2ARC in order
* not to waste space.
*/
int l2arc_rebuild_enabled = B_TRUE;
unsigned long l2arc_rebuild_blocks_min_l2size = 1024 * 1024 * 1024;
/* L2ARC persistence rebuild control routines. */
void l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen);
static void l2arc_dev_rebuild_thread(void *arg);
static int l2arc_rebuild(l2arc_dev_t *dev);
/* L2ARC persistence read I/O routines. */
static int l2arc_dev_hdr_read(l2arc_dev_t *dev);
static int l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lp, const l2arc_log_blkptr_t *next_lp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io);
static zio_t *l2arc_log_blk_fetch(vdev_t *vd,
const l2arc_log_blkptr_t *lp, l2arc_log_blk_phys_t *lb);
static void l2arc_log_blk_fetch_abort(zio_t *zio);
/* L2ARC persistence block restoration routines. */
static void l2arc_log_blk_restore(l2arc_dev_t *dev,
const l2arc_log_blk_phys_t *lb, uint64_t lb_asize);
static void l2arc_hdr_restore(const l2arc_log_ent_phys_t *le,
l2arc_dev_t *dev);
/* L2ARC persistence write I/O routines. */
static void l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio,
l2arc_write_callback_t *cb);
/* L2ARC persistence auxiliary routines. */
boolean_t l2arc_log_blkptr_valid(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *lbp);
static boolean_t l2arc_log_blk_insert(l2arc_dev_t *dev,
const arc_buf_hdr_t *ab);
boolean_t l2arc_range_check_overlap(uint64_t bottom,
uint64_t top, uint64_t check);
static void l2arc_blk_fetch_done(zio_t *zio);
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev);
/*
* We use Cityhash for this. It's fast, and has good hash properties without
* requiring any large static buffers.
*/
static uint64_t
buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
{
return (cityhash4(spa, dva->dva_word[0], dva->dva_word[1], birth));
}
#define HDR_EMPTY(hdr) \
((hdr)->b_dva.dva_word[0] == 0 && \
(hdr)->b_dva.dva_word[1] == 0)
#define HDR_EMPTY_OR_LOCKED(hdr) \
(HDR_EMPTY(hdr) || MUTEX_HELD(HDR_LOCK(hdr)))
#define HDR_EQUAL(spa, dva, birth, hdr) \
((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
static void
buf_discard_identity(arc_buf_hdr_t *hdr)
{
hdr->b_dva.dva_word[0] = 0;
hdr->b_dva.dva_word[1] = 0;
hdr->b_birth = 0;
}
static arc_buf_hdr_t *
buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
{
const dva_t *dva = BP_IDENTITY(bp);
uint64_t birth = BP_PHYSICAL_BIRTH(bp);
uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *hdr;
mutex_enter(hash_lock);
for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
hdr = hdr->b_hash_next) {
if (HDR_EQUAL(spa, dva, birth, hdr)) {
*lockp = hash_lock;
return (hdr);
}
}
mutex_exit(hash_lock);
*lockp = NULL;
return (NULL);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
* If lockp == NULL, the caller is assumed to already hold the hash lock.
*/
static arc_buf_hdr_t *
buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
{
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *fhdr;
uint32_t i;
ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
ASSERT(hdr->b_birth != 0);
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (lockp != NULL) {
*lockp = hash_lock;
mutex_enter(hash_lock);
} else {
ASSERT(MUTEX_HELD(hash_lock));
}
for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
fhdr = fhdr->b_hash_next, i++) {
if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
return (fhdr);
}
hdr->b_hash_next = buf_hash_table.ht_table[idx];
buf_hash_table.ht_table[idx] = hdr;
arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
if (i > 0) {
ARCSTAT_BUMP(arcstat_hash_collisions);
if (i == 1)
ARCSTAT_BUMP(arcstat_hash_chains);
ARCSTAT_MAX(arcstat_hash_chain_max, i);
}
uint64_t he = atomic_inc_64_nv(
&arc_stats.arcstat_hash_elements.value.ui64);
ARCSTAT_MAX(arcstat_hash_elements_max, he);
return (NULL);
}
static void
buf_hash_remove(arc_buf_hdr_t *hdr)
{
arc_buf_hdr_t *fhdr, **hdrp;
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
ASSERT(HDR_IN_HASH_TABLE(hdr));
hdrp = &buf_hash_table.ht_table[idx];
while ((fhdr = *hdrp) != hdr) {
ASSERT3P(fhdr, !=, NULL);
hdrp = &fhdr->b_hash_next;
}
*hdrp = hdr->b_hash_next;
hdr->b_hash_next = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
atomic_dec_64(&arc_stats.arcstat_hash_elements.value.ui64);
if (buf_hash_table.ht_table[idx] &&
buf_hash_table.ht_table[idx]->b_hash_next == NULL)
ARCSTAT_BUMPDOWN(arcstat_hash_chains);
}
/*
* Global data structures and functions for the buf kmem cache.
*/
static kmem_cache_t *hdr_full_cache;
static kmem_cache_t *hdr_full_crypt_cache;
static kmem_cache_t *hdr_l2only_cache;
static kmem_cache_t *buf_cache;
static void
buf_fini(void)
{
int i;
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_free() in the linux kernel\
*/
vmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#else
kmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#endif
for (i = 0; i < BUF_LOCKS; i++)
mutex_destroy(BUF_HASH_LOCK(i));
kmem_cache_destroy(hdr_full_cache);
kmem_cache_destroy(hdr_full_crypt_cache);
kmem_cache_destroy(hdr_l2only_cache);
kmem_cache_destroy(buf_cache);
}
/*
* Constructor callback - called when the cache is empty
* and a new buf is requested.
*/
/* ARGSUSED */
static int
hdr_full_cons(void *vbuf, void *unused, int kmflag)
{
arc_buf_hdr_t *hdr = vbuf;
bzero(hdr, HDR_FULL_SIZE);
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
zfs_refcount_create(&hdr->b_l1hdr.b_refcnt);
mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
list_link_init(&hdr->b_l1hdr.b_arc_node);
list_link_init(&hdr->b_l2hdr.b_l2node);
multilist_link_init(&hdr->b_l1hdr.b_arc_node);
arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
return (0);
}
/* ARGSUSED */
static int
hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag)
{
arc_buf_hdr_t *hdr = vbuf;
hdr_full_cons(vbuf, unused, kmflag);
bzero(&hdr->b_crypt_hdr, sizeof (hdr->b_crypt_hdr));
arc_space_consume(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
return (0);
}
/* ARGSUSED */
static int
hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
{
arc_buf_hdr_t *hdr = vbuf;
bzero(hdr, HDR_L2ONLY_SIZE);
arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
return (0);
}
/* ARGSUSED */
static int
buf_cons(void *vbuf, void *unused, int kmflag)
{
arc_buf_t *buf = vbuf;
bzero(buf, sizeof (arc_buf_t));
mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
return (0);
}
/*
* Destructor callback - called when a cached buf is
* no longer required.
*/
/* ARGSUSED */
static void
hdr_full_dest(void *vbuf, void *unused)
{
arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
cv_destroy(&hdr->b_l1hdr.b_cv);
zfs_refcount_destroy(&hdr->b_l1hdr.b_refcnt);
mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
}
/* ARGSUSED */
static void
hdr_full_crypt_dest(void *vbuf, void *unused)
{
arc_buf_hdr_t *hdr = vbuf;
hdr_full_dest(vbuf, unused);
arc_space_return(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
}
/* ARGSUSED */
static void
hdr_l2only_dest(void *vbuf, void *unused)
{
arc_buf_hdr_t *hdr __maybe_unused = vbuf;
ASSERT(HDR_EMPTY(hdr));
arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
}
/* ARGSUSED */
static void
buf_dest(void *vbuf, void *unused)
{
arc_buf_t *buf = vbuf;
mutex_destroy(&buf->b_evict_lock);
arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
}
static void
buf_init(void)
{
uint64_t *ct = NULL;
uint64_t hsize = 1ULL << 12;
int i, j;
/*
* The hash table is big enough to fill all of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < arc_all_memory())
hsize <<= 1;
retry:
buf_hash_table.ht_mask = hsize - 1;
#if defined(_KERNEL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_alloc() in the linux kernel
*/
buf_hash_table.ht_table =
vmem_zalloc(hsize * sizeof (void*), KM_SLEEP);
#else
buf_hash_table.ht_table =
kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
#endif
if (buf_hash_table.ht_table == NULL) {
ASSERT(hsize > (1ULL << 8));
hsize >>= 1;
goto retry;
}
hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
0, hdr_full_cons, hdr_full_dest, NULL, NULL, NULL, 0);
hdr_full_crypt_cache = kmem_cache_create("arc_buf_hdr_t_full_crypt",
HDR_FULL_CRYPT_SIZE, 0, hdr_full_crypt_cons, hdr_full_crypt_dest,
NULL, NULL, NULL, 0);
hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, NULL,
NULL, NULL, 0);
buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
for (i = 0; i < 256; i++)
for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
for (i = 0; i < BUF_LOCKS; i++)
mutex_init(BUF_HASH_LOCK(i), NULL, MUTEX_DEFAULT, NULL);
}
#define ARC_MINTIME (hz>>4) /* 62 ms */
/*
* This is the size that the buf occupies in memory. If the buf is compressed,
* it will correspond to the compressed size. You should use this method of
* getting the buf size unless you explicitly need the logical size.
*/
uint64_t
arc_buf_size(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr));
}
uint64_t
arc_buf_lsize(arc_buf_t *buf)
{
return (HDR_GET_LSIZE(buf->b_hdr));
}
/*
* This function will return B_TRUE if the buffer is encrypted in memory.
* This buffer can be decrypted by calling arc_untransform().
*/
boolean_t
arc_is_encrypted(arc_buf_t *buf)
{
return (ARC_BUF_ENCRYPTED(buf) != 0);
}
/*
* Returns B_TRUE if the buffer represents data that has not had its MAC
* verified yet.
*/
boolean_t
arc_is_unauthenticated(arc_buf_t *buf)
{
return (HDR_NOAUTH(buf->b_hdr) != 0);
}
void
arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt,
uint8_t *iv, uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_PROTECTED(hdr));
bcopy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
bcopy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
bcopy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
*byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
}
/*
* Indicates how this buffer is compressed in memory. If it is not compressed
* the value will be ZIO_COMPRESS_OFF. It can be made normally readable with
* arc_untransform() as long as it is also unencrypted.
*/
enum zio_compress
arc_get_compression(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF);
}
/*
* Return the compression algorithm used to store this data in the ARC. If ARC
* compression is enabled or this is an encrypted block, this will be the same
* as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF.
*/
static inline enum zio_compress
arc_hdr_get_compress(arc_buf_hdr_t *hdr)
{
return (HDR_COMPRESSION_ENABLED(hdr) ?
HDR_GET_COMPRESS(hdr) : ZIO_COMPRESS_OFF);
}
uint8_t
arc_get_complevel(arc_buf_t *buf)
{
return (buf->b_hdr->b_complevel);
}
static inline boolean_t
arc_buf_is_shared(arc_buf_t *buf)
{
boolean_t shared = (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) &&
buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd));
IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr));
IMPLY(shared, ARC_BUF_SHARED(buf));
IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf));
/*
* It would be nice to assert arc_can_share() too, but the "hdr isn't
* already being shared" requirement prevents us from doing that.
*/
return (shared);
}
/*
* Free the checksum associated with this header. If there is no checksum, this
* is a no-op.
*/
static inline void
arc_cksum_free(arc_buf_hdr_t *hdr)
{
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t));
hdr->b_l1hdr.b_freeze_cksum = NULL;
}
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
}
/*
* Return true iff at least one of the bufs on hdr is not compressed.
* Encrypted buffers count as compressed.
*/
static boolean_t
arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr)
{
ASSERT(hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY_OR_LOCKED(hdr));
for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) {
if (!ARC_BUF_COMPRESSED(b)) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
* matches the checksum that is stored in the hdr. If there is no checksum,
* or if the buf is compressed, this is a no-op.
*/
static void
arc_cksum_verify(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
zio_cksum_t zc;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc);
if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc))
panic("buffer modified while frozen!");
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
}
/*
* This function makes the assumption that data stored in the L2ARC
* will be transformed exactly as it is in the main pool. Because of
* this we can verify the checksum against the reading process's bp.
*/
static boolean_t
arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio)
{
ASSERT(!BP_IS_EMBEDDED(zio->io_bp));
VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr));
/*
* Block pointers always store the checksum for the logical data.
* If the block pointer has the gang bit set, then the checksum
* it represents is for the reconstituted data and not for an
* individual gang member. The zio pipeline, however, must be able to
* determine the checksum of each of the gang constituents so it
* treats the checksum comparison differently than what we need
* for l2arc blocks. This prevents us from using the
* zio_checksum_error() interface directly. Instead we must call the
* zio_checksum_error_impl() so that we can ensure the checksum is
* generated using the correct checksum algorithm and accounts for the
* logical I/O size and not just a gang fragment.
*/
return (zio_checksum_error_impl(zio->io_spa, zio->io_bp,
BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size,
zio->io_offset, NULL) == 0);
}
/*
* Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
* checksum and attaches it to the buf's hdr so that we can ensure that the buf
* isn't modified later on. If buf is compressed or there is already a checksum
* on the hdr, this is a no-op (we only checksum uncompressed bufs).
*/
static void
arc_cksum_compute(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL || ARC_BUF_COMPRESSED(buf)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(!ARC_BUF_COMPRESSED(buf));
hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
KM_SLEEP);
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL,
hdr->b_l1hdr.b_freeze_cksum);
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
arc_buf_watch(buf);
}
#ifndef _KERNEL
void
arc_buf_sigsegv(int sig, siginfo_t *si, void *unused)
{
panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr);
}
#endif
/* ARGSUSED */
static void
arc_buf_unwatch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch) {
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ | PROT_WRITE));
}
#endif
}
/* ARGSUSED */
static void
arc_buf_watch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch)
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ));
#endif
}
static arc_buf_contents_t
arc_buf_type(arc_buf_hdr_t *hdr)
{
arc_buf_contents_t type;
if (HDR_ISTYPE_METADATA(hdr)) {
type = ARC_BUFC_METADATA;
} else {
type = ARC_BUFC_DATA;
}
VERIFY3U(hdr->b_type, ==, type);
return (type);
}
boolean_t
arc_is_metadata(arc_buf_t *buf)
{
return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0);
}
static uint32_t
arc_bufc_to_flags(arc_buf_contents_t type)
{
switch (type) {
case ARC_BUFC_DATA:
/* metadata field is 0 if buffer contains normal data */
return (0);
case ARC_BUFC_METADATA:
return (ARC_FLAG_BUFC_METADATA);
default:
break;
}
panic("undefined ARC buffer type!");
return ((uint32_t)-1);
}
void
arc_buf_thaw(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
arc_cksum_verify(buf);
/*
* Compressed buffers do not manipulate the b_freeze_cksum.
*/
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
}
void
arc_buf_freeze(arc_buf_t *buf)
{
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf))
return;
ASSERT(HDR_HAS_L1HDR(buf->b_hdr));
arc_cksum_compute(buf);
}
/*
* The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
* the following functions should be used to ensure that the flags are
* updated in a thread-safe way. When manipulating the flags either
* the hash_lock must be held or the hdr must be undiscoverable. This
* ensures that we're not racing with any other threads when updating
* the flags.
*/
static inline void
arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags |= flags;
}
static inline void
arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
hdr->b_flags &= ~flags;
}
/*
* Setting the compression bits in the arc_buf_hdr_t's b_flags is
* done in a special way since we have to clear and set bits
* at the same time. Consumers that wish to set the compression bits
* must use this function to ensure that the flags are updated in
* thread-safe manner.
*/
static void
arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp)
{
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Holes and embedded blocks will always have a psize = 0 so
* we ignore the compression of the blkptr and set the
* want to uncompress them. Mark them as uncompressed.
*/
if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) {
arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(!HDR_COMPRESSION_ENABLED(hdr));
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(HDR_COMPRESSION_ENABLED(hdr));
}
HDR_SET_COMPRESS(hdr, cmp);
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp);
}
/*
* Looks for another buf on the same hdr which has the data decompressed, copies
* from it, and returns true. If no such buf exists, returns false.
*/
static boolean_t
arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t copied = B_FALSE;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(!ARC_BUF_COMPRESSED(buf));
for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL;
from = from->b_next) {
/* can't use our own data buffer */
if (from == buf) {
continue;
}
if (!ARC_BUF_COMPRESSED(from)) {
bcopy(from->b_data, buf->b_data, arc_buf_size(buf));
copied = B_TRUE;
break;
}
}
/*
* There were no decompressed bufs, so there should not be a
* checksum on the hdr either.
*/
if (zfs_flags & ZFS_DEBUG_MODIFY)
EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL);
return (copied);
}
/*
* Allocates an ARC buf header that's in an evicted & L2-cached state.
* This is used during l2arc reconstruction to make empty ARC buffers
* which circumvent the regular disk->arc->l2arc path and instead come
* into being in the reverse order, i.e. l2arc->arc.
*/
static arc_buf_hdr_t *
arc_buf_alloc_l2only(size_t size, arc_buf_contents_t type, l2arc_dev_t *dev,
dva_t dva, uint64_t daddr, int32_t psize, uint64_t birth,
enum zio_compress compress, uint8_t complevel, boolean_t protected,
boolean_t prefetch, arc_state_type_t arcs_state)
{
arc_buf_hdr_t *hdr;
ASSERT(size != 0);
hdr = kmem_cache_alloc(hdr_l2only_cache, KM_SLEEP);
hdr->b_birth = birth;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L2HDR);
HDR_SET_LSIZE(hdr, size);
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
if (prefetch)
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
hdr->b_spa = spa_load_guid(dev->l2ad_vdev->vdev_spa);
hdr->b_dva = dva;
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_daddr = daddr;
hdr->b_l2hdr.b_arcs_state = arcs_state;
return (hdr);
}
/*
* Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
*/
static uint64_t
arc_hdr_size(arc_buf_hdr_t *hdr)
{
uint64_t size;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
HDR_GET_PSIZE(hdr) > 0) {
size = HDR_GET_PSIZE(hdr);
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0);
size = HDR_GET_LSIZE(hdr);
}
return (size);
}
static int
arc_hdr_authenticate(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj)
{
int ret;
uint64_t csize;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
void *tmpbuf = NULL;
abd_t *abd = hdr->b_l1hdr.b_pabd;
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_AUTHENTICATED(hdr));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* The MAC is calculated on the compressed data that is stored on disk.
* However, if compressed arc is disabled we will only have the
* decompressed data available to us now. Compress it into a temporary
* abd so we can verify the MAC. The performance overhead of this will
* be relatively low, since most objects in an encrypted objset will
* be encrypted (instead of authenticated) anyway.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
tmpbuf = zio_buf_alloc(lsize);
abd = abd_get_from_buf(tmpbuf, lsize);
abd_take_ownership_of_buf(abd, B_TRUE);
csize = zio_compress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmpbuf, lsize, hdr->b_complevel);
ASSERT3U(csize, <=, psize);
abd_zero_off(abd, csize, psize - csize);
}
/*
* Authentication is best effort. We authenticate whenever the key is
* available. If we succeed we clear ARC_FLAG_NOAUTH.
*/
if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) {
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
ASSERT3U(lsize, ==, psize);
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, dsobj, abd,
psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
} else {
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, abd, psize,
hdr->b_crypt_hdr.b_mac);
}
if (ret == 0)
arc_hdr_clear_flags(hdr, ARC_FLAG_NOAUTH);
else if (ret != ENOENT)
goto error;
if (tmpbuf != NULL)
abd_free(abd);
return (0);
error:
if (tmpbuf != NULL)
abd_free(abd);
return (ret);
}
/*
* This function will take a header that only has raw encrypted data in
* b_crypt_hdr.b_rabd and decrypt it into a new buffer which is stored in
* b_l1hdr.b_pabd. If designated in the header flags, this function will
* also decompress the data.
*/
static int
arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb)
{
int ret;
abd_t *cabd = NULL;
void *tmp = NULL;
boolean_t no_crypt = B_FALSE;
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT(HDR_ENCRYPTED(hdr));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
ret = spa_do_crypt_abd(B_FALSE, spa, zb, hdr->b_crypt_hdr.b_ot,
B_FALSE, bswap, hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_iv,
hdr->b_crypt_hdr.b_mac, HDR_GET_PSIZE(hdr), hdr->b_l1hdr.b_pabd,
hdr->b_crypt_hdr.b_rabd, &no_crypt);
if (ret != 0)
goto error;
if (no_crypt) {
abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
}
/*
* If this header has disabled arc compression but the b_pabd is
* compressed after decrypting it, we need to decompress the newly
* decrypted data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
/*
* We want to make sure that we are correctly honoring the
* zfs_abd_scatter_enabled setting, so we allocate an abd here
* and then loan a buffer from it, rather than allocating a
* linear buffer and wrapping it in an abd later.
*/
cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_DO_ADAPT);
tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
abd_return_buf(cabd, tmp, arc_hdr_size(hdr));
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
}
return (0);
error:
arc_hdr_free_abd(hdr, B_FALSE);
if (cabd != NULL)
arc_free_data_buf(hdr, cabd, arc_hdr_size(hdr), hdr);
return (ret);
}
/*
* This function is called during arc_buf_fill() to prepare the header's
* abd plaintext pointer for use. This involves authenticated protected
* data and decrypting encrypted data into the plaintext abd.
*/
static int
arc_fill_hdr_crypt(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, spa_t *spa,
const zbookmark_phys_t *zb, boolean_t noauth)
{
int ret;
ASSERT(HDR_PROTECTED(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
if (HDR_NOAUTH(hdr) && !noauth) {
/*
* The caller requested authenticated data but our data has
* not been authenticated yet. Verify the MAC now if we can.
*/
ret = arc_hdr_authenticate(hdr, spa, zb->zb_objset);
if (ret != 0)
goto error;
} else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) {
/*
* If we only have the encrypted version of the data, but the
* unencrypted version was requested we take this opportunity
* to store the decrypted version in the header for future use.
*/
ret = arc_hdr_decrypt(hdr, spa, zb);
if (ret != 0)
goto error;
}
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (0);
error:
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (ret);
}
/*
* This function is used by the dbuf code to decrypt bonus buffers in place.
* The dbuf code itself doesn't have any locking for decrypting a shared dnode
* block, so we use the hash lock here to protect against concurrent calls to
* arc_buf_fill().
*/
static void
arc_buf_untransform_in_place(arc_buf_t *buf, kmutex_t *hash_lock)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_ENCRYPTED(hdr));
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
hdr->b_crypt_hdr.b_ebufcnt -= 1;
}
/*
* Given a buf that has a data buffer attached to it, this function will
* efficiently fill the buf with data of the specified compression setting from
* the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
* are already sharing a data buf, no copy is performed.
*
* If the buf is marked as compressed but uncompressed data was requested, this
* will allocate a new data buffer for the buf, remove that flag, and fill the
* buf with uncompressed data. You can't request a compressed buf on a hdr with
* uncompressed data, and (since we haven't added support for it yet) if you
* want compressed data your buf must already be marked as compressed and have
* the correct-sized data buffer.
*/
static int
arc_buf_fill(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
arc_fill_flags_t flags)
{
int error = 0;
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t hdr_compressed =
(arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
boolean_t compressed = (flags & ARC_FILL_COMPRESSED) != 0;
boolean_t encrypted = (flags & ARC_FILL_ENCRYPTED) != 0;
dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap;
kmutex_t *hash_lock = (flags & ARC_FILL_LOCKED) ? NULL : HDR_LOCK(hdr);
ASSERT3P(buf->b_data, !=, NULL);
IMPLY(compressed, hdr_compressed || ARC_BUF_ENCRYPTED(buf));
IMPLY(compressed, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, HDR_ENCRYPTED(hdr));
IMPLY(encrypted, ARC_BUF_ENCRYPTED(buf));
IMPLY(encrypted, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, !ARC_BUF_SHARED(buf));
/*
* If the caller wanted encrypted data we just need to copy it from
* b_rabd and potentially byteswap it. We won't be able to do any
* further transforms on it.
*/
if (encrypted) {
ASSERT(HDR_HAS_RABD(hdr));
abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
goto byteswap;
}
/*
* Adjust encrypted and authenticated headers to accommodate
* the request if needed. Dnode blocks (ARC_FILL_IN_PLACE) are
* allowed to fail decryption due to keys not being loaded
* without being marked as an IO error.
*/
if (HDR_PROTECTED(hdr)) {
error = arc_fill_hdr_crypt(hdr, hash_lock, spa,
zb, !!(flags & ARC_FILL_NOAUTH));
if (error == EACCES && (flags & ARC_FILL_IN_PLACE) != 0) {
return (error);
} else if (error != 0) {
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (error);
}
}
/*
* There is a special case here for dnode blocks which are
* decrypting their bonus buffers. These blocks may request to
* be decrypted in-place. This is necessary because there may
* be many dnodes pointing into this buffer and there is
* currently no method to synchronize replacing the backing
* b_data buffer and updating all of the pointers. Here we use
* the hash lock to ensure there are no races. If the need
* arises for other types to be decrypted in-place, they must
* add handling here as well.
*/
if ((flags & ARC_FILL_IN_PLACE) != 0) {
ASSERT(!hdr_compressed);
ASSERT(!compressed);
ASSERT(!encrypted);
if (HDR_ENCRYPTED(hdr) && ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_buf_untransform_in_place(buf, hash_lock);
if (hash_lock != NULL)
mutex_exit(hash_lock);
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
}
return (0);
}
if (hdr_compressed == compressed) {
if (!arc_buf_is_shared(buf)) {
abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd,
arc_buf_size(buf));
}
} else {
ASSERT(hdr_compressed);
ASSERT(!compressed);
ASSERT3U(HDR_GET_LSIZE(hdr), !=, HDR_GET_PSIZE(hdr));
/*
* If the buf is sharing its data with the hdr, unlink it and
* allocate a new data buffer for the buf.
*/
if (arc_buf_is_shared(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
/* We need to give the buf its own b_data */
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
/* Previously overhead was 0; just add new overhead */
ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr));
} else if (ARC_BUF_COMPRESSED(buf)) {
/* We need to reallocate the buf's b_data */
arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr),
buf);
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
/* We increased the size of b_data; update overhead */
ARCSTAT_INCR(arcstat_overhead_size,
HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr));
}
/*
* Regardless of the buf's previous compression settings, it
* should not be compressed at the end of this function.
*/
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
/*
* Try copying the data from another buf which already has a
* decompressed version. If that's not possible, it's time to
* bite the bullet and decompress the data from the hdr.
*/
if (arc_buf_try_copy_decompressed_data(buf)) {
/* Skip byteswapping and checksumming (already done) */
return (0);
} else {
error = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, buf->b_data,
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr),
&hdr->b_complevel);
/*
* Absent hardware errors or software bugs, this should
* be impossible, but log it anyway so we can debug it.
*/
if (error != 0) {
zfs_dbgmsg(
"hdr %px, compress %d, psize %d, lsize %d",
hdr, arc_hdr_get_compress(hdr),
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (SET_ERROR(EIO));
}
}
}
byteswap:
/* Byteswap the buf's data if necessary */
if (bswap != DMU_BSWAP_NUMFUNCS) {
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS);
dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr));
}
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
return (0);
}
/*
* If this function is being called to decrypt an encrypted buffer or verify an
* authenticated one, the key must be loaded and a mapping must be made
* available in the keystore via spa_keystore_create_mapping() or one of its
* callers.
*/
int
arc_untransform(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
boolean_t in_place)
{
int ret;
arc_fill_flags_t flags = 0;
if (in_place)
flags |= ARC_FILL_IN_PLACE;
ret = arc_buf_fill(buf, spa, zb, flags);
if (ret == ECKSUM) {
/*
* Convert authentication and decryption errors to EIO
* (and generate an ereport) before leaving the ARC.
*/
ret = SET_ERROR(EIO);
spa_log_error(spa, zb);
(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
return (ret);
}
/*
* Increment the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_add_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Decrement the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Add a reference to this hdr indicating that someone is actively
* referencing that memory. When the refcount transitions from 0 to 1,
* we remove it from the respective arc_state_t list to indicate that
* it is not evictable.
*/
static void
add_reference(arc_buf_hdr_t *hdr, void *tag)
{
arc_state_t *state;
ASSERT(HDR_HAS_L1HDR(hdr));
if (!HDR_EMPTY(hdr) && !MUTEX_HELD(HDR_LOCK(hdr))) {
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
}
state = hdr->b_l1hdr.b_state;
if ((zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
(state != arc_anon)) {
/* We don't use the L2-only state list. */
if (state != arc_l2c_only) {
multilist_remove(&state->arcs_list[arc_buf_type(hdr)],
hdr);
arc_evictable_space_decrement(hdr, state);
}
/* remove the prefetch flag if we get a reference */
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
}
/*
* Remove a reference from this hdr. When the reference transitions from
* 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
* list making it eligible for eviction.
*/
static int
remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
{
int cnt;
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
ASSERT(!GHOST_STATE(state));
/*
* arc_l2c_only counts as a ghost state so we don't need to explicitly
* check to prevent usage of the arc_l2c_only list.
*/
if (((cnt = zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
(state != arc_anon)) {
multilist_insert(&state->arcs_list[arc_buf_type(hdr)], hdr);
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
arc_evictable_space_increment(hdr, state);
}
return (cnt);
}
/*
* Returns detailed information about a specific arc buffer. When the
* state_index argument is set the function will calculate the arc header
* list position for its arc state. Since this requires a linear traversal
* callers are strongly encourage not to do this. However, it can be helpful
* for targeted analysis so the functionality is provided.
*/
void
arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
{
arc_buf_hdr_t *hdr = ab->b_hdr;
l1arc_buf_hdr_t *l1hdr = NULL;
l2arc_buf_hdr_t *l2hdr = NULL;
arc_state_t *state = NULL;
memset(abi, 0, sizeof (arc_buf_info_t));
if (hdr == NULL)
return;
abi->abi_flags = hdr->b_flags;
if (HDR_HAS_L1HDR(hdr)) {
l1hdr = &hdr->b_l1hdr;
state = l1hdr->b_state;
}
if (HDR_HAS_L2HDR(hdr))
l2hdr = &hdr->b_l2hdr;
if (l1hdr) {
abi->abi_bufcnt = l1hdr->b_bufcnt;
abi->abi_access = l1hdr->b_arc_access;
abi->abi_mru_hits = l1hdr->b_mru_hits;
abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
abi->abi_mfu_hits = l1hdr->b_mfu_hits;
abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
abi->abi_holds = zfs_refcount_count(&l1hdr->b_refcnt);
}
if (l2hdr) {
abi->abi_l2arc_dattr = l2hdr->b_daddr;
abi->abi_l2arc_hits = l2hdr->b_hits;
}
abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON;
abi->abi_state_contents = arc_buf_type(hdr);
abi->abi_size = arc_hdr_size(hdr);
}
/*
* Move the supplied buffer to the indicated state. The hash lock
* for the buffer must be held by the caller.
*/
static void
arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
kmutex_t *hash_lock)
{
arc_state_t *old_state;
int64_t refcnt;
uint32_t bufcnt;
boolean_t update_old, update_new;
arc_buf_contents_t buftype = arc_buf_type(hdr);
/*
* We almost always have an L1 hdr here, since we call arc_hdr_realloc()
* in arc_read() when bringing a buffer out of the L2ARC. However, the
* L1 hdr doesn't always exist when we change state to arc_anon before
* destroying a header, in which case reallocating to add the L1 hdr is
* pointless.
*/
if (HDR_HAS_L1HDR(hdr)) {
old_state = hdr->b_l1hdr.b_state;
refcnt = zfs_refcount_count(&hdr->b_l1hdr.b_refcnt);
bufcnt = hdr->b_l1hdr.b_bufcnt;
update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
} else {
old_state = arc_l2c_only;
refcnt = 0;
bufcnt = 0;
update_old = B_FALSE;
}
update_new = update_old;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT3P(new_state, !=, old_state);
ASSERT(!GHOST_STATE(new_state) || bufcnt == 0);
ASSERT(old_state != arc_anon || bufcnt <= 1);
/*
* If this buffer is evictable, transfer it from the
* old state list to the new state list.
*/
if (refcnt == 0) {
if (old_state != arc_anon && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
multilist_remove(&old_state->arcs_list[buftype], hdr);
if (GHOST_STATE(old_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
update_old = B_TRUE;
}
arc_evictable_space_decrement(hdr, old_state);
}
if (new_state != arc_anon && new_state != arc_l2c_only) {
/*
* An L1 header always exists here, since if we're
* moving to some L1-cached state (i.e. not l2c_only or
* anonymous), we realloc the header to add an L1hdr
* beforehand.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
multilist_insert(&new_state->arcs_list[buftype], hdr);
if (GHOST_STATE(new_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
update_new = B_TRUE;
}
arc_evictable_space_increment(hdr, new_state);
}
}
ASSERT(!HDR_EMPTY(hdr));
if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
/* adjust state sizes (ignore arc_l2c_only) */
if (update_new && new_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(new_state)) {
ASSERT0(bufcnt);
/*
* When moving a header to a ghost state, we first
* remove all arc buffers. Thus, we'll have a
* bufcnt of zero, and no arc buffer to use for
* the reference. As a result, we use the arc
* header pointer for the reference.
*/
(void) zfs_refcount_add_many(&new_state->arcs_size,
HDR_GET_LSIZE(hdr), hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
uint32_t buffers = 0;
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0);
buffers++;
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_add_many(
&new_state->arcs_size,
arc_buf_size(buf), buf);
}
ASSERT3U(bufcnt, ==, buffers);
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_add_many(
&new_state->arcs_size,
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_add_many(
&new_state->arcs_size,
HDR_GET_PSIZE(hdr), hdr);
}
}
}
if (update_old && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(old_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* When moving a header off of a ghost state,
* the header will not contain any arc buffers.
* We use the arc header pointer for the reference
* which is exactly what we did when we put the
* header on the ghost state.
*/
(void) zfs_refcount_remove_many(&old_state->arcs_size,
HDR_GET_LSIZE(hdr), hdr);
} else {
uint32_t buffers = 0;
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0);
buffers++;
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (arc_buf_is_shared(buf))
continue;
(void) zfs_refcount_remove_many(
&old_state->arcs_size, arc_buf_size(buf),
buf);
}
ASSERT3U(bufcnt, ==, buffers);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size, arc_hdr_size(hdr),
hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) zfs_refcount_remove_many(
&old_state->arcs_size, HDR_GET_PSIZE(hdr),
hdr);
}
}
}
if (HDR_HAS_L1HDR(hdr)) {
hdr->b_l1hdr.b_state = new_state;
if (HDR_HAS_L2HDR(hdr) && new_state != arc_l2c_only) {
l2arc_hdr_arcstats_decrement_state(hdr);
hdr->b_l2hdr.b_arcs_state = new_state->arcs_state;
l2arc_hdr_arcstats_increment_state(hdr);
}
}
}
void
arc_space_consume(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, space);
break;
case ARC_SPACE_DNODE:
aggsum_add(&arc_sums.arcstat_dnode_size, space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
/*
* Note: this includes space wasted by all scatter ABD's, not
* just those allocated by the ARC. But the vast majority of
* scatter ABD's come from the ARC, because other users are
* very short-lived.
*/
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE)
aggsum_add(&arc_sums.arcstat_meta_used, space);
aggsum_add(&arc_sums.arcstat_size, space);
}
void
arc_space_return(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, -space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, -space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, -space);
break;
case ARC_SPACE_DNODE:
aggsum_add(&arc_sums.arcstat_dnode_size, -space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, -space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, -space);
break;
case ARC_SPACE_L2HDRS:
aggsum_add(&arc_sums.arcstat_l2_hdr_size, -space);
break;
case ARC_SPACE_ABD_CHUNK_WASTE:
ARCSTAT_INCR(arcstat_abd_chunk_waste_size, -space);
break;
}
if (type != ARC_SPACE_DATA && type != ARC_SPACE_ABD_CHUNK_WASTE) {
ASSERT(aggsum_compare(&arc_sums.arcstat_meta_used,
space) >= 0);
ARCSTAT_MAX(arcstat_meta_max,
aggsum_upper_bound(&arc_sums.arcstat_meta_used));
aggsum_add(&arc_sums.arcstat_meta_used, -space);
}
ASSERT(aggsum_compare(&arc_sums.arcstat_size, space) >= 0);
aggsum_add(&arc_sums.arcstat_size, -space);
}
/*
* Given a hdr and a buf, returns whether that buf can share its b_data buffer
* with the hdr's b_pabd.
*/
static boolean_t
arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
/*
* The criteria for sharing a hdr's data are:
* 1. the buffer is not encrypted
* 2. the hdr's compression matches the buf's compression
* 3. the hdr doesn't need to be byteswapped
* 4. the hdr isn't already being shared
* 5. the buf is either compressed or it is the last buf in the hdr list
*
* Criterion #5 maintains the invariant that shared uncompressed
* bufs must be the final buf in the hdr's b_buf list. Reading this, you
* might ask, "if a compressed buf is allocated first, won't that be the
* last thing in the list?", but in that case it's impossible to create
* a shared uncompressed buf anyway (because the hdr must be compressed
* to have the compressed buf). You might also think that #3 is
* sufficient to make this guarantee, however it's possible
* (specifically in the rare L2ARC write race mentioned in
* arc_buf_alloc_impl()) there will be an existing uncompressed buf that
* is shareable, but wasn't at the time of its allocation. Rather than
* allow a new shared uncompressed buf to be created and then shuffle
* the list around to make it the last element, this simply disallows
* sharing if the new buf isn't the first to be added.
*/
ASSERT3P(buf->b_hdr, ==, hdr);
boolean_t hdr_compressed =
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF;
boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
return (!ARC_BUF_ENCRYPTED(buf) &&
buf_compressed == hdr_compressed &&
hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS &&
!HDR_SHARED_DATA(hdr) &&
(ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf)));
}
/*
* Allocate a buf for this hdr. If you care about the data that's in the hdr,
* or if you want a compressed buffer, pass those flags in. Returns 0 if the
* copy was made successfully, or an error code otherwise.
*/
static int
arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, const zbookmark_phys_t *zb,
void *tag, boolean_t encrypted, boolean_t compressed, boolean_t noauth,
boolean_t fill, arc_buf_t **ret)
{
arc_buf_t *buf;
arc_fill_flags_t flags = ARC_FILL_LOCKED;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
VERIFY(hdr->b_type == ARC_BUFC_DATA ||
hdr->b_type == ARC_BUFC_METADATA);
ASSERT3P(ret, !=, NULL);
ASSERT3P(*ret, ==, NULL);
IMPLY(encrypted, compressed);
buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
buf->b_hdr = hdr;
buf->b_data = NULL;
buf->b_next = hdr->b_l1hdr.b_buf;
buf->b_flags = 0;
add_reference(hdr, tag);
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Only honor requests for compressed bufs if the hdr is actually
* compressed. This must be overridden if the buffer is encrypted since
* encrypted buffers cannot be decompressed.
*/
if (encrypted) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED;
flags |= ARC_FILL_COMPRESSED | ARC_FILL_ENCRYPTED;
} else if (compressed &&
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
flags |= ARC_FILL_COMPRESSED;
}
if (noauth) {
ASSERT0(encrypted);
flags |= ARC_FILL_NOAUTH;
}
/*
* If the hdr's data can be shared then we share the data buffer and
* set the appropriate bit in the hdr's b_flags to indicate the hdr is
* sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new
* buffer to store the buf's data.
*
* There are two additional restrictions here because we're sharing
* hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
* actively involved in an L2ARC write, because if this buf is used by
* an arc_write() then the hdr's data buffer will be released when the
* write completes, even though the L2ARC write might still be using it.
* Second, the hdr's ABD must be linear so that the buf's user doesn't
* need to be ABD-aware. It must be allocated via
* zio_[data_]buf_alloc(), not as a page, because we need to be able
* to abd_release_ownership_of_buf(), which isn't allowed on "linear
* page" buffers because the ABD code needs to handle freeing them
* specially.
*/
boolean_t can_share = arc_can_share(hdr, buf) &&
!HDR_L2_WRITING(hdr) &&
hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(hdr->b_l1hdr.b_pabd) &&
!abd_is_linear_page(hdr->b_l1hdr.b_pabd);
/* Set up b_data and sharing */
if (can_share) {
buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
buf->b_data =
arc_get_data_buf(hdr, arc_buf_size(buf), buf);
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
VERIFY3P(buf->b_data, !=, NULL);
hdr->b_l1hdr.b_buf = buf;
hdr->b_l1hdr.b_bufcnt += 1;
if (encrypted)
hdr->b_crypt_hdr.b_ebufcnt += 1;
/*
* If the user wants the data from the hdr, we need to either copy or
* decompress the data.
*/
if (fill) {
ASSERT3P(zb, !=, NULL);
return (arc_buf_fill(buf, spa, zb, flags));
}
return (0);
}
static char *arc_onloan_tag = "onloan";
static inline void
arc_loaned_bytes_update(int64_t delta)
{
atomic_add_64(&arc_loaned_bytes, delta);
/* assert that it did not wrap around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
}
/*
* Loan out an anonymous arc buffer. Loaned buffers are not counted as in
* flight data by arc_tempreserve_space() until they are "returned". Loaned
* buffers must be returned to the arc before they can be used by the DMU or
* freed.
*/
arc_buf_t *
arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size)
{
arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag,
is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag,
psize, lsize, compression_type, complevel);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_raw_buf(spa_t *spa, uint64_t dsobj, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_t *buf = arc_alloc_raw_buf(spa, arc_onloan_tag, dsobj,
byteorder, salt, iv, mac, ot, psize, lsize, compression_type,
complevel);
atomic_add_64(&arc_loaned_bytes, psize);
return (buf);
}
/*
* Return a loaned arc buffer to the arc.
*/
void
arc_return_buf(arc_buf_t *buf, void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
arc_loaned_bytes_update(-arc_buf_size(buf));
}
/* Detach an arc_buf from a dbuf (tag) */
void
arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) zfs_refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
(void) zfs_refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
arc_loaned_bytes_update(arc_buf_size(buf));
}
static void
l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type)
{
l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP);
df->l2df_abd = abd;
df->l2df_size = size;
df->l2df_type = type;
mutex_enter(&l2arc_free_on_write_mtx);
list_insert_head(l2arc_free_on_write, df);
mutex_exit(&l2arc_free_on_write_mtx);
}
static void
arc_hdr_free_on_write(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, hdr);
}
(void) zfs_refcount_remove_many(&state->arcs_size, size, hdr);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
if (free_rdata) {
l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type);
} else {
l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type);
}
}
/*
* Share the arc_buf_t's data with the hdr. Whenever we are sharing the
* data buffer, we transfer the refcount ownership to the hdr and update
* the appropriate kstats.
*/
static void
arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_can_share(hdr, buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* Start sharing the data buffer. We transfer the
* refcount ownership to the hdr since it always owns
* the refcount whenever an arc_buf_t is shared.
*/
zfs_refcount_transfer_ownership_many(&hdr->b_l1hdr.b_state->arcs_size,
arc_hdr_size(hdr), buf, hdr);
hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
HDR_ISTYPE_METADATA(hdr));
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
/*
* Since we've transferred ownership to the hdr we need
* to increment its compressed and uncompressed kstats and
* decrement the overhead size.
*/
ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf));
}
static void
arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
/*
* We are no longer sharing this buffer so we need
* to transfer its ownership to the rightful owner.
*/
zfs_refcount_transfer_ownership_many(&hdr->b_l1hdr.b_state->arcs_size,
arc_hdr_size(hdr), hdr, buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
abd_free(hdr->b_l1hdr.b_pabd);
hdr->b_l1hdr.b_pabd = NULL;
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
/*
* Since the buffer is no longer shared between
* the arc buf and the hdr, count it as overhead.
*/
ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
/*
* Remove an arc_buf_t from the hdr's buf list and return the last
* arc_buf_t on the list. If no buffers remain on the list then return
* NULL.
*/
static arc_buf_t *
arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
arc_buf_t *lastbuf = NULL;
/*
* Remove the buf from the hdr list and locate the last
* remaining buffer on the list.
*/
while (*bufp != NULL) {
if (*bufp == buf)
*bufp = buf->b_next;
/*
* If we've removed a buffer in the middle of
* the list then update the lastbuf and update
* bufp.
*/
if (*bufp != NULL) {
lastbuf = *bufp;
bufp = &(*bufp)->b_next;
}
}
buf->b_next = NULL;
ASSERT3P(lastbuf, !=, buf);
IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL);
IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL);
IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf));
return (lastbuf);
}
/*
* Free up buf->b_data and pull the arc_buf_t off of the arc_buf_hdr_t's
* list and free it.
*/
static void
arc_buf_destroy_impl(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Free up the data associated with the buf but only if we're not
* sharing this with the hdr. If we are sharing it with the hdr, the
* hdr is responsible for doing the free.
*/
if (buf->b_data != NULL) {
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(HDR_EMPTY_OR_LOCKED(hdr));
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
if (arc_buf_is_shared(buf)) {
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
uint64_t size = arc_buf_size(buf);
arc_free_data_buf(hdr, buf->b_data, size, buf);
ARCSTAT_INCR(arcstat_overhead_size, -size);
}
buf->b_data = NULL;
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
hdr->b_l1hdr.b_bufcnt -= 1;
if (ARC_BUF_ENCRYPTED(buf)) {
hdr->b_crypt_hdr.b_ebufcnt -= 1;
/*
* If we have no more encrypted buffers and we've
* already gotten a copy of the decrypted data we can
* free b_rabd to save some space.
*/
if (hdr->b_crypt_hdr.b_ebufcnt == 0 &&
HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd != NULL &&
!HDR_IO_IN_PROGRESS(hdr)) {
arc_hdr_free_abd(hdr, B_TRUE);
}
}
}
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
/*
* If the current arc_buf_t is sharing its data buffer with the
* hdr, then reassign the hdr's b_pabd to share it with the new
* buffer at the end of the list. The shared buffer is always
* the last one on the hdr's buffer list.
*
* There is an equivalent case for compressed bufs, but since
* they aren't guaranteed to be the last buf in the list and
* that is an exceedingly rare case, we just allow that space be
* wasted temporarily. We must also be careful not to share
* encrypted buffers, since they cannot be shared.
*/
if (lastbuf != NULL && !ARC_BUF_ENCRYPTED(lastbuf)) {
/* Only one buf can be shared at once */
VERIFY(!arc_buf_is_shared(lastbuf));
/* hdr is uncompressed so can't have compressed buf */
VERIFY(!ARC_BUF_COMPRESSED(lastbuf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
arc_hdr_free_abd(hdr, B_FALSE);
/*
* We must setup a new shared block between the
* last buffer and the hdr. The data would have
* been allocated by the arc buf so we need to transfer
* ownership to the hdr since it's now being shared.
*/
arc_share_buf(hdr, lastbuf);
}
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT3P(lastbuf, !=, NULL);
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
}
/*
* Free the checksum if we're removing the last uncompressed buf from
* this hdr.
*/
if (!arc_hdr_has_uncompressed_buf(hdr)) {
arc_cksum_free(hdr);
}
/* clean up the buf */
buf->b_hdr = NULL;
kmem_cache_free(buf_cache, buf);
}
static void
arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, int alloc_flags)
{
uint64_t size;
boolean_t alloc_rdata = ((alloc_flags & ARC_HDR_ALLOC_RDATA) != 0);
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_SHARED_DATA(hdr) || alloc_rdata);
IMPLY(alloc_rdata, HDR_PROTECTED(hdr));
if (alloc_rdata) {
size = HDR_GET_PSIZE(hdr);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL);
hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL);
ARCSTAT_INCR(arcstat_raw_size, size);
} else {
size = arc_hdr_size(hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr,
alloc_flags);
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
}
ARCSTAT_INCR(arcstat_compressed_size, size);
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
}
static void
arc_hdr_free_abd(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
IMPLY(free_rdata, HDR_HAS_RABD(hdr));
/*
* If the hdr is currently being written to the l2arc then
* we defer freeing the data by adding it to the l2arc_free_on_write
* list. The l2arc will free the data once it's finished
* writing it to the l2arc device.
*/
if (HDR_L2_WRITING(hdr)) {
arc_hdr_free_on_write(hdr, free_rdata);
ARCSTAT_BUMP(arcstat_l2_free_on_write);
} else if (free_rdata) {
arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr);
} else {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, size, hdr);
}
if (free_rdata) {
hdr->b_crypt_hdr.b_rabd = NULL;
ARCSTAT_INCR(arcstat_raw_size, -size);
} else {
hdr->b_l1hdr.b_pabd = NULL;
}
if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr))
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
ARCSTAT_INCR(arcstat_compressed_size, -size);
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
}
/*
* Allocate empty anonymous ARC header. The header will get its identity
* assigned and buffers attached later as part of read or write operations.
*
* In case of read arc_read() assigns header its identify (b_dva + b_birth),
* inserts it into ARC hash to become globally visible and allocates physical
* (b_pabd) or raw (b_rabd) ABD buffer to read into from disk. On disk read
* completion arc_read_done() allocates ARC buffer(s) as needed, potentially
* sharing one of them with the physical ABD buffer.
*
* In case of write arc_alloc_buf() allocates ARC buffer to be filled with
* data. Then after compression and/or encryption arc_write_ready() allocates
* and fills (or potentially shares) physical (b_pabd) or raw (b_rabd) ABD
* buffer. On disk write completion arc_write_done() assigns the header its
* new identity (b_dva + b_birth) and inserts into ARC hash.
*
* In case of partial overwrite the old data is read first as described. Then
* arc_release() either allocates new anonymous ARC header and moves the ARC
* buffer to it, or reuses the old ARC header by discarding its identity and
* removing it from ARC hash. After buffer modification normal write process
* follows as described.
*/
static arc_buf_hdr_t *
arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
boolean_t protected, enum zio_compress compression_type, uint8_t complevel,
arc_buf_contents_t type)
{
arc_buf_hdr_t *hdr;
VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA);
if (protected) {
hdr = kmem_cache_alloc(hdr_full_crypt_cache, KM_PUSHPAGE);
} else {
hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
}
ASSERT(HDR_EMPTY(hdr));
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
HDR_SET_PSIZE(hdr, psize);
HDR_SET_LSIZE(hdr, lsize);
hdr->b_spa = spa;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR);
arc_hdr_set_compress(hdr, compression_type);
hdr->b_complevel = complevel;
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
hdr->b_l1hdr.b_state = arc_anon;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_bufcnt = 0;
hdr->b_l1hdr.b_buf = NULL;
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
return (hdr);
}
/*
* Transition between the two allocation states for the arc_buf_hdr struct.
* The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
* (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
* version is used when a cache buffer is only in the L2ARC in order to reduce
* memory usage.
*/
static arc_buf_hdr_t *
arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
{
ASSERT(HDR_HAS_L2HDR(hdr));
arc_buf_hdr_t *nhdr;
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
(old == hdr_l2only_cache && new == hdr_full_cache));
/*
* if the caller wanted a new full header and the header is to be
* encrypted we will actually allocate the header from the full crypt
* cache instead. The same applies to freeing from the old cache.
*/
if (HDR_PROTECTED(hdr) && new == hdr_full_cache)
new = hdr_full_crypt_cache;
if (HDR_PROTECTED(hdr) && old == hdr_full_cache)
old = hdr_full_crypt_cache;
nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
buf_hash_remove(hdr);
bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
if (new == hdr_full_cache || new == hdr_full_crypt_cache) {
arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
/*
* arc_access and arc_change_state need to be aware that a
* header has just come out of L2ARC, so we set its state to
* l2c_only even though it's about to change.
*/
nhdr->b_l1hdr.b_state = arc_l2c_only;
/* Verify previous threads set to NULL before freeing */
ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
/*
* If we've reached here, We must have been called from
* arc_evict_hdr(), as such we should have already been
* removed from any ghost list we were previously on
* (which protects us from racing with arc_evict_state),
* thus no locking is needed during this check.
*/
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
/*
* A buffer must not be moved into the arc_l2c_only
* state if it's not finished being written out to the
* l2arc device. Otherwise, the b_l1hdr.b_pabd field
* might try to be accessed, even though it was removed.
*/
VERIFY(!HDR_L2_WRITING(hdr));
VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
}
/*
* The header has been reallocated so we need to re-insert it into any
* lists it was on.
*/
(void) buf_hash_insert(nhdr, NULL);
ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
mutex_enter(&dev->l2ad_mtx);
/*
* We must place the realloc'ed header back into the list at
* the same spot. Otherwise, if it's placed earlier in the list,
* l2arc_write_buffers() could find it during the function's
* write phase, and try to write it out to the l2arc.
*/
list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
list_remove(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
/*
* Since we're using the pointer address as the tag when
* incrementing and decrementing the l2ad_alloc refcount, we
* must remove the old pointer (that we're about to destroy) and
* add the new pointer to the refcount. Otherwise we'd remove
* the wrong pointer address when calling arc_hdr_destroy() later.
*/
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(nhdr), nhdr);
buf_discard_identity(hdr);
kmem_cache_free(old, hdr);
return (nhdr);
}
/*
* This function allows an L1 header to be reallocated as a crypt
* header and vice versa. If we are going to a crypt header, the
* new fields will be zeroed out.
*/
static arc_buf_hdr_t *
arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
{
arc_buf_hdr_t *nhdr;
arc_buf_t *buf;
kmem_cache_t *ncache, *ocache;
/*
* This function requires that hdr is in the arc_anon state.
* Therefore it won't have any L2ARC data for us to worry
* about copying.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_HAS_L2HDR(hdr));
ASSERT3U(!!HDR_PROTECTED(hdr), !=, need_crypt);
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!list_link_active(&hdr->b_l2hdr.b_l2node));
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (need_crypt) {
ncache = hdr_full_crypt_cache;
ocache = hdr_full_cache;
} else {
ncache = hdr_full_cache;
ocache = hdr_full_crypt_cache;
}
nhdr = kmem_cache_alloc(ncache, KM_PUSHPAGE);
/*
* Copy all members that aren't locks or condvars to the new header.
* No lists are pointing to us (as we asserted above), so we don't
* need to worry about the list nodes.
*/
nhdr->b_dva = hdr->b_dva;
nhdr->b_birth = hdr->b_birth;
nhdr->b_type = hdr->b_type;
nhdr->b_flags = hdr->b_flags;
nhdr->b_psize = hdr->b_psize;
nhdr->b_lsize = hdr->b_lsize;
nhdr->b_spa = hdr->b_spa;
nhdr->b_l1hdr.b_freeze_cksum = hdr->b_l1hdr.b_freeze_cksum;
nhdr->b_l1hdr.b_bufcnt = hdr->b_l1hdr.b_bufcnt;
nhdr->b_l1hdr.b_byteswap = hdr->b_l1hdr.b_byteswap;
nhdr->b_l1hdr.b_state = hdr->b_l1hdr.b_state;
nhdr->b_l1hdr.b_arc_access = hdr->b_l1hdr.b_arc_access;
nhdr->b_l1hdr.b_mru_hits = hdr->b_l1hdr.b_mru_hits;
nhdr->b_l1hdr.b_mru_ghost_hits = hdr->b_l1hdr.b_mru_ghost_hits;
nhdr->b_l1hdr.b_mfu_hits = hdr->b_l1hdr.b_mfu_hits;
nhdr->b_l1hdr.b_mfu_ghost_hits = hdr->b_l1hdr.b_mfu_ghost_hits;
nhdr->b_l1hdr.b_acb = hdr->b_l1hdr.b_acb;
nhdr->b_l1hdr.b_pabd = hdr->b_l1hdr.b_pabd;
/*
* This zfs_refcount_add() exists only to ensure that the individual
* arc buffers always point to a header that is referenced, avoiding
* a small race condition that could trigger ASSERTs.
*/
(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG);
nhdr->b_l1hdr.b_buf = hdr->b_l1hdr.b_buf;
for (buf = nhdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
mutex_enter(&buf->b_evict_lock);
buf->b_hdr = nhdr;
mutex_exit(&buf->b_evict_lock);
}
zfs_refcount_transfer(&nhdr->b_l1hdr.b_refcnt, &hdr->b_l1hdr.b_refcnt);
(void) zfs_refcount_remove(&nhdr->b_l1hdr.b_refcnt, FTAG);
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
if (need_crypt) {
arc_hdr_set_flags(nhdr, ARC_FLAG_PROTECTED);
} else {
arc_hdr_clear_flags(nhdr, ARC_FLAG_PROTECTED);
}
/* unset all members of the original hdr */
bzero(&hdr->b_dva, sizeof (dva_t));
hdr->b_birth = 0;
hdr->b_type = ARC_BUFC_INVALID;
hdr->b_flags = 0;
hdr->b_psize = 0;
hdr->b_lsize = 0;
hdr->b_spa = 0;
hdr->b_l1hdr.b_freeze_cksum = NULL;
hdr->b_l1hdr.b_buf = NULL;
hdr->b_l1hdr.b_bufcnt = 0;
hdr->b_l1hdr.b_byteswap = 0;
hdr->b_l1hdr.b_state = NULL;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_acb = NULL;
hdr->b_l1hdr.b_pabd = NULL;
if (ocache == hdr_full_crypt_cache) {
ASSERT(!HDR_HAS_RABD(hdr));
hdr->b_crypt_hdr.b_ot = DMU_OT_NONE;
hdr->b_crypt_hdr.b_ebufcnt = 0;
hdr->b_crypt_hdr.b_dsobj = 0;
bzero(hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
bzero(hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
bzero(hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
}
buf_discard_identity(hdr);
kmem_cache_free(ocache, hdr);
return (nhdr);
}
/*
* This function is used by the send / receive code to convert a newly
* allocated arc_buf_t to one that is suitable for a raw encrypted write. It
* is also used to allow the root objset block to be updated without altering
* its embedded MACs. Both block types will always be uncompressed so we do not
* have to worry about compression type or psize.
*/
void
arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder,
dmu_object_type_t ot, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(ot == DMU_OT_DNODE || ot == DMU_OT_OBJSET);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED);
if (!HDR_PROTECTED(hdr))
hdr = arc_hdr_realloc_crypt(hdr, B_TRUE);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
if (salt != NULL)
bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
if (iv != NULL)
bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
if (mac != NULL)
bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
}
/*
* Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
* The buf is returned thawed since we expect the consumer to modify it.
*/
arc_buf_t *
arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size)
{
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
B_FALSE, ZIO_COMPRESS_OFF, 0, type);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE, B_FALSE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
return (buf);
}
/*
* Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
* for bufs containing metadata.
*/
arc_buf_t *
arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
B_FALSE, compression_type, complevel, ARC_BUFC_DATA);
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_FALSE,
B_TRUE, B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
/*
* To ensure that the hdr has the correct data in it if we call
* arc_untransform() on this buf before it's been written to disk,
* it's easiest if we just set up sharing between the buf and the hdr.
*/
arc_share_buf(hdr, buf);
return (buf);
}
arc_buf_t *
arc_alloc_raw_buf(spa_t *spa, void *tag, uint64_t dsobj, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type, uint8_t complevel)
{
arc_buf_hdr_t *hdr;
arc_buf_t *buf;
arc_buf_contents_t type = DMU_OT_IS_METADATA(ot) ?
ARC_BUFC_METADATA : ARC_BUFC_DATA;
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >=, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, B_TRUE,
compression_type, complevel, type);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
/*
* This buffer will be considered encrypted even if the ot is not an
* encrypted type. It will become authenticated instead in
* arc_write_ready().
*/
buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, NULL, tag, B_TRUE, B_TRUE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
return (buf);
}
static void
l2arc_hdr_arcstats_update(arc_buf_hdr_t *hdr, boolean_t incr,
boolean_t state_only)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
arc_buf_contents_t type = hdr->b_type;
int64_t lsize_s;
int64_t psize_s;
int64_t asize_s;
if (incr) {
lsize_s = lsize;
psize_s = psize;
asize_s = asize;
} else {
lsize_s = -lsize;
psize_s = -psize;
asize_s = -asize;
}
/* If the buffer is a prefetch, count it as such. */
if (HDR_PREFETCH(hdr)) {
ARCSTAT_INCR(arcstat_l2_prefetch_asize, asize_s);
} else {
/*
* We use the value stored in the L2 header upon initial
* caching in L2ARC. This value will be updated in case
* an MRU/MRU_ghost buffer transitions to MFU but the L2ARC
* metadata (log entry) cannot currently be updated. Having
* the ARC state in the L2 header solves the problem of a
* possibly absent L1 header (apparent in buffers restored
* from persistent L2ARC).
*/
switch (hdr->b_l2hdr.b_arcs_state) {
case ARC_STATE_MRU_GHOST:
case ARC_STATE_MRU:
ARCSTAT_INCR(arcstat_l2_mru_asize, asize_s);
break;
case ARC_STATE_MFU_GHOST:
case ARC_STATE_MFU:
ARCSTAT_INCR(arcstat_l2_mfu_asize, asize_s);
break;
default:
break;
}
}
if (state_only)
return;
ARCSTAT_INCR(arcstat_l2_psize, psize_s);
ARCSTAT_INCR(arcstat_l2_lsize, lsize_s);
switch (type) {
case ARC_BUFC_DATA:
ARCSTAT_INCR(arcstat_l2_bufc_data_asize, asize_s);
break;
case ARC_BUFC_METADATA:
ARCSTAT_INCR(arcstat_l2_bufc_metadata_asize, asize_s);
break;
default:
break;
}
}
static void
arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
ASSERT(HDR_HAS_L2HDR(hdr));
list_remove(&dev->l2ad_buflist, hdr);
l2arc_hdr_arcstats_decrement(hdr);
vdev_space_update(dev->l2ad_vdev, -asize, 0, 0);
(void) zfs_refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr),
hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
}
static void
arc_hdr_destroy(arc_buf_hdr_t *hdr)
{
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_buf == NULL ||
hdr->b_l1hdr.b_bufcnt > 0);
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
}
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (HDR_HAS_L2HDR(hdr)) {
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
if (!buflist_held)
mutex_enter(&dev->l2ad_mtx);
/*
* Even though we checked this conditional above, we
* need to check this again now that we have the
* l2ad_mtx. This is because we could be racing with
* another thread calling l2arc_evict() which might have
* destroyed this header's L2 portion as we were waiting
* to acquire the l2ad_mtx. If that happens, we don't
* want to re-destroy the header's L2 portion.
*/
if (HDR_HAS_L2HDR(hdr))
arc_hdr_l2hdr_destroy(hdr);
if (!buflist_held)
mutex_exit(&dev->l2ad_mtx);
}
/*
* The header's identify can only be safely discarded once it is no
* longer discoverable. This requires removing it from the hash table
* and the l2arc header list. After this point the hash lock can not
* be used to protect the header.
*/
if (!HDR_EMPTY(hdr))
buf_discard_identity(hdr);
if (HDR_HAS_L1HDR(hdr)) {
arc_cksum_free(hdr);
while (hdr->b_l1hdr.b_buf != NULL)
arc_buf_destroy_impl(hdr->b_l1hdr.b_buf);
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
if (!HDR_PROTECTED(hdr)) {
kmem_cache_free(hdr_full_cache, hdr);
} else {
kmem_cache_free(hdr_full_crypt_cache, hdr);
}
} else {
kmem_cache_free(hdr_l2only_cache, hdr);
}
}
void
arc_buf_destroy(arc_buf_t *buf, void* tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
if (hdr->b_l1hdr.b_state == arc_anon) {
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
VERIFY0(remove_reference(hdr, NULL, tag));
arc_hdr_destroy(hdr);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hdr, ==, buf->b_hdr);
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon);
ASSERT3P(buf->b_data, !=, NULL);
(void) remove_reference(hdr, hash_lock, tag);
arc_buf_destroy_impl(buf);
mutex_exit(hash_lock);
}
/*
* Evict the arc_buf_hdr that is provided as a parameter. The resultant
* state of the header is dependent on its state prior to entering this
* function. The following transitions are possible:
*
* - arc_mru -> arc_mru_ghost
* - arc_mfu -> arc_mfu_ghost
* - arc_mru_ghost -> arc_l2c_only
* - arc_mru_ghost -> deleted
* - arc_mfu_ghost -> arc_l2c_only
* - arc_mfu_ghost -> deleted
*
* Return total size of evicted data buffers for eviction progress tracking.
* When evicting from ghost states return logical buffer size to make eviction
* progress at the same (or at least comparable) rate as from non-ghost states.
*
* Return *real_evicted for actual ARC size reduction to wake up threads
* waiting for it. For non-ghost states it includes size of evicted data
* buffers (the headers are not freed there). For ghost states it includes
* only the evicted headers size.
*/
static int64_t
arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, uint64_t *real_evicted)
{
arc_state_t *evicted_state, *state;
int64_t bytes_evicted = 0;
int min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ?
arc_min_prescient_prefetch_ms : arc_min_prefetch_ms;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT(HDR_HAS_L1HDR(hdr));
*real_evicted = 0;
state = hdr->b_l1hdr.b_state;
if (GHOST_STATE(state)) {
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
/*
* l2arc_write_buffers() relies on a header's L1 portion
* (i.e. its b_pabd field) during it's write phase.
* Thus, we cannot push a header onto the arc_l2c_only
* state (removing its L1 piece) until the header is
* done being written to the l2arc.
*/
if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) {
ARCSTAT_BUMP(arcstat_evict_l2_skip);
return (bytes_evicted);
}
ARCSTAT_BUMP(arcstat_deleted);
bytes_evicted += HDR_GET_LSIZE(hdr);
DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
if (HDR_HAS_L2HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_pabd == NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* This buffer is cached on the 2nd Level ARC;
* don't destroy the header.
*/
arc_change_state(arc_l2c_only, hdr, hash_lock);
/*
* dropping from L1+L2 cached to L2-only,
* realloc to remove the L1 header.
*/
hdr = arc_hdr_realloc(hdr, hdr_full_cache,
hdr_l2only_cache);
*real_evicted += HDR_FULL_SIZE - HDR_L2ONLY_SIZE;
} else {
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
*real_evicted += HDR_FULL_SIZE;
}
return (bytes_evicted);
}
ASSERT(state == arc_mru || state == arc_mfu);
evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
/* prefetch buffers have a minimum lifespan */
if (HDR_IO_IN_PROGRESS(hdr) ||
((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
MSEC_TO_TICK(min_lifetime))) {
ARCSTAT_BUMP(arcstat_evict_skip);
return (bytes_evicted);
}
ASSERT0(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt));
while (hdr->b_l1hdr.b_buf) {
arc_buf_t *buf = hdr->b_l1hdr.b_buf;
if (!mutex_tryenter(&buf->b_evict_lock)) {
ARCSTAT_BUMP(arcstat_mutex_miss);
break;
}
if (buf->b_data != NULL) {
bytes_evicted += HDR_GET_LSIZE(hdr);
*real_evicted += HDR_GET_LSIZE(hdr);
}
mutex_exit(&buf->b_evict_lock);
arc_buf_destroy_impl(buf);
}
if (HDR_HAS_L2HDR(hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr));
} else {
if (l2arc_write_eligible(hdr->b_spa, hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_eligible,
HDR_GET_LSIZE(hdr));
switch (state->arcs_state) {
case ARC_STATE_MRU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mru,
HDR_GET_LSIZE(hdr));
break;
case ARC_STATE_MFU:
ARCSTAT_INCR(
arcstat_evict_l2_eligible_mfu,
HDR_GET_LSIZE(hdr));
break;
default:
break;
}
} else {
ARCSTAT_INCR(arcstat_evict_l2_ineligible,
HDR_GET_LSIZE(hdr));
}
}
if (hdr->b_l1hdr.b_bufcnt == 0) {
arc_cksum_free(hdr);
bytes_evicted += arc_hdr_size(hdr);
*real_evicted += arc_hdr_size(hdr);
/*
* If this hdr is being evicted and has a compressed
* buffer then we discard it here before we change states.
* This ensures that the accounting is updated correctly
* in arc_free_data_impl().
*/
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
arc_change_state(evicted_state, hdr, hash_lock);
ASSERT(HDR_IN_HASH_TABLE(hdr));
arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
}
return (bytes_evicted);
}
static void
arc_set_need_free(void)
{
ASSERT(MUTEX_HELD(&arc_evict_lock));
int64_t remaining = arc_free_memory() - arc_sys_free / 2;
arc_evict_waiter_t *aw = list_tail(&arc_evict_waiters);
if (aw == NULL) {
arc_need_free = MAX(-remaining, 0);
} else {
arc_need_free =
MAX(-remaining, (int64_t)(aw->aew_count - arc_evict_count));
}
}
static uint64_t
arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
uint64_t spa, uint64_t bytes)
{
multilist_sublist_t *mls;
uint64_t bytes_evicted = 0, real_evicted = 0;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
int evict_count = zfs_arc_evict_batch_limit;
ASSERT3P(marker, !=, NULL);
mls = multilist_sublist_lock(ml, idx);
for (hdr = multilist_sublist_prev(mls, marker); likely(hdr != NULL);
hdr = multilist_sublist_prev(mls, marker)) {
if ((evict_count <= 0) || (bytes_evicted >= bytes))
break;
/*
* To keep our iteration location, move the marker
* forward. Since we're not holding hdr's hash lock, we
* must be very careful and not remove 'hdr' from the
* sublist. Otherwise, other consumers might mistake the
* 'hdr' as not being on a sublist when they call the
* multilist_link_active() function (they all rely on
* the hash lock protecting concurrent insertions and
* removals). multilist_sublist_move_forward() was
* specifically implemented to ensure this is the case
* (only 'marker' will be removed and re-inserted).
*/
multilist_sublist_move_forward(mls, marker);
/*
* The only case where the b_spa field should ever be
* zero, is the marker headers inserted by
* arc_evict_state(). It's possible for multiple threads
* to be calling arc_evict_state() concurrently (e.g.
* dsl_pool_close() and zio_inject_fault()), so we must
* skip any markers we see from these other threads.
*/
if (hdr->b_spa == 0)
continue;
/* we're only interested in evicting buffers of a certain spa */
if (spa != 0 && hdr->b_spa != spa) {
ARCSTAT_BUMP(arcstat_evict_skip);
continue;
}
hash_lock = HDR_LOCK(hdr);
/*
* We aren't calling this function from any code path
* that would already be holding a hash lock, so we're
* asserting on this assumption to be defensive in case
* this ever changes. Without this check, it would be
* possible to incorrectly increment arcstat_mutex_miss
* below (e.g. if the code changed such that we called
* this function with a hash lock held).
*/
ASSERT(!MUTEX_HELD(hash_lock));
if (mutex_tryenter(hash_lock)) {
uint64_t revicted;
uint64_t evicted = arc_evict_hdr(hdr, hash_lock,
&revicted);
mutex_exit(hash_lock);
bytes_evicted += evicted;
real_evicted += revicted;
/*
* If evicted is zero, arc_evict_hdr() must have
* decided to skip this header, don't increment
* evict_count in this case.
*/
if (evicted != 0)
evict_count--;
} else {
ARCSTAT_BUMP(arcstat_mutex_miss);
}
}
multilist_sublist_unlock(mls);
/*
* Increment the count of evicted bytes, and wake up any threads that
* are waiting for the count to reach this value. Since the list is
* ordered by ascending aew_count, we pop off the beginning of the
* list until we reach the end, or a waiter that's past the current
* "count". Doing this outside the loop reduces the number of times
* we need to acquire the global arc_evict_lock.
*
* Only wake when there's sufficient free memory in the system
* (specifically, arc_sys_free/2, which by default is a bit more than
* 1/64th of RAM). See the comments in arc_wait_for_eviction().
*/
mutex_enter(&arc_evict_lock);
arc_evict_count += real_evicted;
if (arc_free_memory() > arc_sys_free / 2) {
arc_evict_waiter_t *aw;
while ((aw = list_head(&arc_evict_waiters)) != NULL &&
aw->aew_count <= arc_evict_count) {
list_remove(&arc_evict_waiters, aw);
cv_broadcast(&aw->aew_cv);
}
}
arc_set_need_free();
mutex_exit(&arc_evict_lock);
/*
* If the ARC size is reduced from arc_c_max to arc_c_min (especially
* if the average cached block is small), eviction can be on-CPU for
* many seconds. To ensure that other threads that may be bound to
* this CPU are able to make progress, make a voluntary preemption
* call here.
*/
cond_resched();
return (bytes_evicted);
}
/*
* Evict buffers from the given arc state, until we've removed the
* specified number of bytes. Move the removed buffers to the
* appropriate evict state.
*
* This function makes a "best effort". It skips over any buffers
* it can't get a hash_lock on, and so, may not catch all candidates.
* It may also return without evicting as much space as requested.
*
* If bytes is specified using the special value ARC_EVICT_ALL, this
* will evict all available (i.e. unlocked and evictable) buffers from
* the given arc state; which is used by arc_flush().
*/
static uint64_t
arc_evict_state(arc_state_t *state, uint64_t spa, uint64_t bytes,
arc_buf_contents_t type)
{
uint64_t total_evicted = 0;
multilist_t *ml = &state->arcs_list[type];
int num_sublists;
arc_buf_hdr_t **markers;
num_sublists = multilist_get_num_sublists(ml);
/*
* If we've tried to evict from each sublist, made some
* progress, but still have not hit the target number of bytes
* to evict, we want to keep trying. The markers allow us to
* pick up where we left off for each individual sublist, rather
* than starting from the tail each time.
*/
markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP);
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls;
markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
/*
* A b_spa of 0 is used to indicate that this header is
* a marker. This fact is used in arc_evict_type() and
* arc_evict_state_impl().
*/
markers[i]->b_spa = 0;
mls = multilist_sublist_lock(ml, i);
multilist_sublist_insert_tail(mls, markers[i]);
multilist_sublist_unlock(mls);
}
/*
* While we haven't hit our target number of bytes to evict, or
* we're evicting all available buffers.
*/
while (total_evicted < bytes) {
int sublist_idx = multilist_get_random_index(ml);
uint64_t scan_evicted = 0;
/*
* Try to reduce pinned dnodes with a floor of arc_dnode_limit.
* Request that 10% of the LRUs be scanned by the superblock
* shrinker.
*/
if (type == ARC_BUFC_DATA && aggsum_compare(
&arc_sums.arcstat_dnode_size, arc_dnode_size_limit) > 0) {
arc_prune_async((aggsum_upper_bound(
&arc_sums.arcstat_dnode_size) -
arc_dnode_size_limit) / sizeof (dnode_t) /
zfs_arc_dnode_reduce_percent);
}
/*
* Start eviction using a randomly selected sublist,
* this is to try and evenly balance eviction across all
* sublists. Always starting at the same sublist
* (e.g. index 0) would cause evictions to favor certain
* sublists over others.
*/
for (int i = 0; i < num_sublists; i++) {
uint64_t bytes_remaining;
uint64_t bytes_evicted;
if (total_evicted < bytes)
bytes_remaining = bytes - total_evicted;
else
break;
bytes_evicted = arc_evict_state_impl(ml, sublist_idx,
markers[sublist_idx], spa, bytes_remaining);
scan_evicted += bytes_evicted;
total_evicted += bytes_evicted;
/* we've reached the end, wrap to the beginning */
if (++sublist_idx >= num_sublists)
sublist_idx = 0;
}
/*
* If we didn't evict anything during this scan, we have
* no reason to believe we'll evict more during another
* scan, so break the loop.
*/
if (scan_evicted == 0) {
/* This isn't possible, let's make that obvious */
ASSERT3S(bytes, !=, 0);
/*
* When bytes is ARC_EVICT_ALL, the only way to
* break the loop is when scan_evicted is zero.
* In that case, we actually have evicted enough,
* so we don't want to increment the kstat.
*/
if (bytes != ARC_EVICT_ALL) {
ASSERT3S(total_evicted, <, bytes);
ARCSTAT_BUMP(arcstat_evict_not_enough);
}
break;
}
}
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
multilist_sublist_remove(mls, markers[i]);
multilist_sublist_unlock(mls);
kmem_cache_free(hdr_full_cache, markers[i]);
}
kmem_free(markers, sizeof (*markers) * num_sublists);
return (total_evicted);
}
/*
* Flush all "evictable" data of the given type from the arc state
* specified. This will not evict any "active" buffers (i.e. referenced).
*
* When 'retry' is set to B_FALSE, the function will make a single pass
* over the state and evict any buffers that it can. Since it doesn't
* continually retry the eviction, it might end up leaving some buffers
* in the ARC due to lock misses.
*
* When 'retry' is set to B_TRUE, the function will continually retry the
* eviction until *all* evictable buffers have been removed from the
* state. As a result, if concurrent insertions into the state are
* allowed (e.g. if the ARC isn't shutting down), this function might
* wind up in an infinite loop, continually trying to evict buffers.
*/
static uint64_t
arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
boolean_t retry)
{
uint64_t evicted = 0;
while (zfs_refcount_count(&state->arcs_esize[type]) != 0) {
evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
if (!retry)
break;
}
return (evicted);
}
/*
* Evict the specified number of bytes from the state specified,
* restricting eviction to the spa and type given. This function
* prevents us from trying to evict more from a state's list than
* is "evictable", and to skip evicting altogether when passed a
* negative value for "bytes". In contrast, arc_evict_state() will
* evict everything it can, when passed a negative value for "bytes".
*/
static uint64_t
arc_evict_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
arc_buf_contents_t type)
{
uint64_t delta;
if (bytes > 0 && zfs_refcount_count(&state->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&state->arcs_esize[type]),
bytes);
return (arc_evict_state(state, spa, delta, type));
}
return (0);
}
/*
* The goal of this function is to evict enough meta data buffers from the
* ARC in order to enforce the arc_meta_limit. Achieving this is slightly
* more complicated than it appears because it is common for data buffers
* to have holds on meta data buffers. In addition, dnode meta data buffers
* will be held by the dnodes in the block preventing them from being freed.
* This means we can't simply traverse the ARC and expect to always find
* enough unheld meta data buffer to release.
*
* Therefore, this function has been updated to make alternating passes
* over the ARC releasing data buffers and then newly unheld meta data
* buffers. This ensures forward progress is maintained and meta_used
* will decrease. Normally this is sufficient, but if required the ARC
* will call the registered prune callbacks causing dentry and inodes to
* be dropped from the VFS cache. This will make dnode meta data buffers
* available for reclaim.
*/
static uint64_t
arc_evict_meta_balanced(uint64_t meta_used)
{
int64_t delta, prune = 0, adjustmnt;
uint64_t total_evicted = 0;
arc_buf_contents_t type = ARC_BUFC_DATA;
int restarts = MAX(zfs_arc_meta_adjust_restarts, 0);
restart:
/*
* This slightly differs than the way we evict from the mru in
* arc_evict because we don't have a "target" value (i.e. no
* "meta" arc_p). As a result, I think we can completely
* cannibalize the metadata in the MRU before we evict the
* metadata from the MFU. I think we probably need to implement a
* "metadata arc_p" value to do this properly.
*/
adjustmnt = meta_used - arc_meta_limit;
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mru->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&arc_mru->arcs_esize[type]),
adjustmnt);
total_evicted += arc_evict_impl(arc_mru, 0, delta, type);
adjustmnt -= delta;
}
/*
* We can't afford to recalculate adjustmnt here. If we do,
* new metadata buffers can sneak into the MRU or ANON lists,
* thus penalize the MFU metadata. Although the fudge factor is
* small, it has been empirically shown to be significant for
* certain workloads (e.g. creating many empty directories). As
* such, we use the original calculation for adjustmnt, and
* simply decrement the amount of data evicted from the MRU.
*/
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
delta = MIN(zfs_refcount_count(&arc_mfu->arcs_esize[type]),
adjustmnt);
total_evicted += arc_evict_impl(arc_mfu, 0, delta, type);
}
adjustmnt = meta_used - arc_meta_limit;
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
delta = MIN(adjustmnt,
zfs_refcount_count(&arc_mru_ghost->arcs_esize[type]));
total_evicted += arc_evict_impl(arc_mru_ghost, 0, delta, type);
adjustmnt -= delta;
}
if (adjustmnt > 0 &&
zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
delta = MIN(adjustmnt,
zfs_refcount_count(&arc_mfu_ghost->arcs_esize[type]));
total_evicted += arc_evict_impl(arc_mfu_ghost, 0, delta, type);
}
/*
* If after attempting to make the requested adjustment to the ARC
* the meta limit is still being exceeded then request that the
* higher layers drop some cached objects which have holds on ARC
* meta buffers. Requests to the upper layers will be made with
* increasingly large scan sizes until the ARC is below the limit.
*/
if (meta_used > arc_meta_limit) {
if (type == ARC_BUFC_DATA) {
type = ARC_BUFC_METADATA;
} else {
type = ARC_BUFC_DATA;
if (zfs_arc_meta_prune) {
prune += zfs_arc_meta_prune;
arc_prune_async(prune);
}
}
if (restarts > 0) {
restarts--;
goto restart;
}
}
return (total_evicted);
}
/*
* Evict metadata buffers from the cache, such that arcstat_meta_used is
* capped by the arc_meta_limit tunable.
*/
static uint64_t
arc_evict_meta_only(uint64_t meta_used)
{
uint64_t total_evicted = 0;
int64_t target;
/*
* If we're over the meta limit, we want to evict enough
* metadata to get back under the meta limit. We don't want to
* evict so much that we drop the MRU below arc_p, though. If
* we're over the meta limit more than we're over arc_p, we
* evict some from the MRU here, and some from the MFU below.
*/
target = MIN((int64_t)(meta_used - arc_meta_limit),
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) - arc_p));
total_evicted += arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
/*
* Similar to the above, we want to evict enough bytes to get us
* below the meta limit, but not so much as to drop us below the
* space allotted to the MFU (which is defined as arc_c - arc_p).
*/
target = MIN((int64_t)(meta_used - arc_meta_limit),
(int64_t)(zfs_refcount_count(&arc_mfu->arcs_size) -
(arc_c - arc_p)));
total_evicted += arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
return (total_evicted);
}
static uint64_t
arc_evict_meta(uint64_t meta_used)
{
if (zfs_arc_meta_strategy == ARC_STRATEGY_META_ONLY)
return (arc_evict_meta_only(meta_used));
else
return (arc_evict_meta_balanced(meta_used));
}
/*
* Return the type of the oldest buffer in the given arc state
*
* This function will select a random sublist of type ARC_BUFC_DATA and
* a random sublist of type ARC_BUFC_METADATA. The tail of each sublist
* is compared, and the type which contains the "older" buffer will be
* returned.
*/
static arc_buf_contents_t
arc_evict_type(arc_state_t *state)
{
multilist_t *data_ml = &state->arcs_list[ARC_BUFC_DATA];
multilist_t *meta_ml = &state->arcs_list[ARC_BUFC_METADATA];
int data_idx = multilist_get_random_index(data_ml);
int meta_idx = multilist_get_random_index(meta_ml);
multilist_sublist_t *data_mls;
multilist_sublist_t *meta_mls;
arc_buf_contents_t type;
arc_buf_hdr_t *data_hdr;
arc_buf_hdr_t *meta_hdr;
/*
* We keep the sublist lock until we're finished, to prevent
* the headers from being destroyed via arc_evict_state().
*/
data_mls = multilist_sublist_lock(data_ml, data_idx);
meta_mls = multilist_sublist_lock(meta_ml, meta_idx);
/*
* These two loops are to ensure we skip any markers that
* might be at the tail of the lists due to arc_evict_state().
*/
for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL;
data_hdr = multilist_sublist_prev(data_mls, data_hdr)) {
if (data_hdr->b_spa != 0)
break;
}
for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL;
meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) {
if (meta_hdr->b_spa != 0)
break;
}
if (data_hdr == NULL && meta_hdr == NULL) {
type = ARC_BUFC_DATA;
} else if (data_hdr == NULL) {
ASSERT3P(meta_hdr, !=, NULL);
type = ARC_BUFC_METADATA;
} else if (meta_hdr == NULL) {
ASSERT3P(data_hdr, !=, NULL);
type = ARC_BUFC_DATA;
} else {
ASSERT3P(data_hdr, !=, NULL);
ASSERT3P(meta_hdr, !=, NULL);
/* The headers can't be on the sublist without an L1 header */
ASSERT(HDR_HAS_L1HDR(data_hdr));
ASSERT(HDR_HAS_L1HDR(meta_hdr));
if (data_hdr->b_l1hdr.b_arc_access <
meta_hdr->b_l1hdr.b_arc_access) {
type = ARC_BUFC_DATA;
} else {
type = ARC_BUFC_METADATA;
}
}
multilist_sublist_unlock(meta_mls);
multilist_sublist_unlock(data_mls);
return (type);
}
/*
* Evict buffers from the cache, such that arcstat_size is capped by arc_c.
*/
static uint64_t
arc_evict(void)
{
uint64_t total_evicted = 0;
uint64_t bytes;
int64_t target;
uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
uint64_t ameta = aggsum_value(&arc_sums.arcstat_meta_used);
/*
* If we're over arc_meta_limit, we want to correct that before
* potentially evicting data buffers below.
*/
total_evicted += arc_evict_meta(ameta);
/*
* Adjust MRU size
*
* If we're over the target cache size, we want to evict enough
* from the list to get back to our target size. We don't want
* to evict too much from the MRU, such that it drops below
* arc_p. So, if we're over our target cache size more than
* the MRU is over arc_p, we'll evict enough to get back to
* arc_p here, and then evict more from the MFU below.
*/
target = MIN((int64_t)(asize - arc_c),
(int64_t)(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) + ameta - arc_p));
/*
* If we're below arc_meta_min, always prefer to evict data.
* Otherwise, try to satisfy the requested number of bytes to
* evict from the type which contains older buffers; in an
* effort to keep newer buffers in the cache regardless of their
* type. If we cannot satisfy the number of bytes from this
* type, spill over into the next type.
*/
if (arc_evict_type(arc_mru) == ARC_BUFC_METADATA &&
ameta > arc_meta_min) {
bytes = arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* metadata, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mru, 0, target, ARC_BUFC_DATA);
} else {
bytes = arc_evict_impl(arc_mru, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* data, we try to get the rest from metadata.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
}
/*
* Re-sum ARC stats after the first round of evictions.
*/
asize = aggsum_value(&arc_sums.arcstat_size);
ameta = aggsum_value(&arc_sums.arcstat_meta_used);
/*
* Adjust MFU size
*
* Now that we've tried to evict enough from the MRU to get its
* size back to arc_p, if we're still above the target cache
* size, we evict the rest from the MFU.
*/
target = asize - arc_c;
if (arc_evict_type(arc_mfu) == ARC_BUFC_METADATA &&
ameta > arc_meta_min) {
bytes = arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* metadata, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
} else {
bytes = arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* data, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
}
/*
* Adjust ghost lists
*
* In addition to the above, the ARC also defines target values
* for the ghost lists. The sum of the mru list and mru ghost
* list should never exceed the target size of the cache, and
* the sum of the mru list, mfu list, mru ghost list, and mfu
* ghost list should never exceed twice the target size of the
* cache. The following logic enforces these limits on the ghost
* caches, and evicts from them as needed.
*/
target = zfs_refcount_count(&arc_mru->arcs_size) +
zfs_refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
bytes = arc_evict_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA);
/*
* We assume the sum of the mru list and mfu list is less than
* or equal to arc_c (we enforced this above), which means we
* can use the simpler of the two equations below:
*
* mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
* mru ghost + mfu ghost <= arc_c
*/
target = zfs_refcount_count(&arc_mru_ghost->arcs_size) +
zfs_refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
bytes = arc_evict_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
target -= bytes;
total_evicted +=
arc_evict_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA);
return (total_evicted);
}
void
arc_flush(spa_t *spa, boolean_t retry)
{
uint64_t guid = 0;
/*
* If retry is B_TRUE, a spa must not be specified since we have
* no good way to determine if all of a spa's buffers have been
* evicted from an arc state.
*/
ASSERT(!retry || spa == 0);
if (spa != NULL)
guid = spa_load_guid(spa);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry);
}
void
arc_reduce_target_size(int64_t to_free)
{
uint64_t asize = aggsum_value(&arc_sums.arcstat_size);
/*
* All callers want the ARC to actually evict (at least) this much
* memory. Therefore we reduce from the lower of the current size and
* the target size. This way, even if arc_c is much higher than
* arc_size (as can be the case after many calls to arc_freed(), we will
* immediately have arc_c < arc_size and therefore the arc_evict_zthr
* will evict.
*/
uint64_t c = MIN(arc_c, asize);
if (c > to_free && c - to_free > arc_c_min) {
arc_c = c - to_free;
atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
if (arc_p > arc_c)
arc_p = (arc_c >> 1);
ASSERT(arc_c >= arc_c_min);
ASSERT((int64_t)arc_p >= 0);
} else {
arc_c = arc_c_min;
}
if (asize > arc_c) {
/* See comment in arc_evict_cb_check() on why lock+flag */
mutex_enter(&arc_evict_lock);
arc_evict_needed = B_TRUE;
mutex_exit(&arc_evict_lock);
zthr_wakeup(arc_evict_zthr);
}
}
/*
* Determine if the system is under memory pressure and is asking
* to reclaim memory. A return value of B_TRUE indicates that the system
* is under memory pressure and that the arc should adjust accordingly.
*/
boolean_t
arc_reclaim_needed(void)
{
return (arc_available_memory() < 0);
}
void
arc_kmem_reap_soon(void)
{
size_t i;
kmem_cache_t *prev_cache = NULL;
kmem_cache_t *prev_data_cache = NULL;
extern kmem_cache_t *zio_buf_cache[];
extern kmem_cache_t *zio_data_buf_cache[];
#ifdef _KERNEL
if ((aggsum_compare(&arc_sums.arcstat_meta_used,
arc_meta_limit) >= 0) && zfs_arc_meta_prune) {
/*
* We are exceeding our meta-data cache limit.
* Prune some entries to release holds on meta-data.
*/
arc_prune_async(zfs_arc_meta_prune);
}
#if defined(_ILP32)
/*
* Reclaim unused memory from all kmem caches.
*/
kmem_reap();
#endif
#endif
for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
#if defined(_ILP32)
/* reach upper limit of cache size on 32-bit */
if (zio_buf_cache[i] == NULL)
break;
#endif
if (zio_buf_cache[i] != prev_cache) {
prev_cache = zio_buf_cache[i];
kmem_cache_reap_now(zio_buf_cache[i]);
}
if (zio_data_buf_cache[i] != prev_data_cache) {
prev_data_cache = zio_data_buf_cache[i];
kmem_cache_reap_now(zio_data_buf_cache[i]);
}
}
kmem_cache_reap_now(buf_cache);
kmem_cache_reap_now(hdr_full_cache);
kmem_cache_reap_now(hdr_l2only_cache);
kmem_cache_reap_now(zfs_btree_leaf_cache);
abd_cache_reap_now();
}
/* ARGSUSED */
static boolean_t
arc_evict_cb_check(void *arg, zthr_t *zthr)
{
#ifdef ZFS_DEBUG
/*
* This is necessary in order to keep the kstat information
* up to date for tools that display kstat data such as the
* mdb ::arc dcmd and the Linux crash utility. These tools
* typically do not call kstat's update function, but simply
* dump out stats from the most recent update. Without
* this call, these commands may show stale stats for the
* anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
* with this call, the data might be out of date if the
* evict thread hasn't been woken recently; but that should
* suffice. The arc_state_t structures can be queried
* directly if more accurate information is needed.
*/
if (arc_ksp != NULL)
arc_ksp->ks_update(arc_ksp, KSTAT_READ);
#endif
/*
* We have to rely on arc_wait_for_eviction() to tell us when to
* evict, rather than checking if we are overflowing here, so that we
* are sure to not leave arc_wait_for_eviction() waiting on aew_cv.
* If we have become "not overflowing" since arc_wait_for_eviction()
* checked, we need to wake it up. We could broadcast the CV here,
* but arc_wait_for_eviction() may have not yet gone to sleep. We
* would need to use a mutex to ensure that this function doesn't
* broadcast until arc_wait_for_eviction() has gone to sleep (e.g.
* the arc_evict_lock). However, the lock ordering of such a lock
* would necessarily be incorrect with respect to the zthr_lock,
* which is held before this function is called, and is held by
* arc_wait_for_eviction() when it calls zthr_wakeup().
*/
return (arc_evict_needed);
}
/*
* Keep arc_size under arc_c by running arc_evict which evicts data
* from the ARC.
*/
/* ARGSUSED */
static void
arc_evict_cb(void *arg, zthr_t *zthr)
{
uint64_t evicted = 0;
fstrans_cookie_t cookie = spl_fstrans_mark();
/* Evict from cache */
evicted = arc_evict();
/*
* If evicted is zero, we couldn't evict anything
* via arc_evict(). This could be due to hash lock
* collisions, but more likely due to the majority of
* arc buffers being unevictable. Therefore, even if
* arc_size is above arc_c, another pass is unlikely to
* be helpful and could potentially cause us to enter an
* infinite loop. Additionally, zthr_iscancelled() is
* checked here so that if the arc is shutting down, the
* broadcast will wake any remaining arc evict waiters.
*/
mutex_enter(&arc_evict_lock);
arc_evict_needed = !zthr_iscancelled(arc_evict_zthr) &&
evicted > 0 && aggsum_compare(&arc_sums.arcstat_size, arc_c) > 0;
if (!arc_evict_needed) {
/*
* We're either no longer overflowing, or we
* can't evict anything more, so we should wake
* arc_get_data_impl() sooner.
*/
arc_evict_waiter_t *aw;
while ((aw = list_remove_head(&arc_evict_waiters)) != NULL) {
cv_broadcast(&aw->aew_cv);
}
arc_set_need_free();
}
mutex_exit(&arc_evict_lock);
spl_fstrans_unmark(cookie);
}
/* ARGSUSED */
static boolean_t
arc_reap_cb_check(void *arg, zthr_t *zthr)
{
int64_t free_memory = arc_available_memory();
static int reap_cb_check_counter = 0;
/*
* If a kmem reap is already active, don't schedule more. We must
* check for this because kmem_cache_reap_soon() won't actually
* block on the cache being reaped (this is to prevent callers from
* becoming implicitly blocked by a system-wide kmem reap -- which,
* on a system with many, many full magazines, can take minutes).
*/
if (!kmem_cache_reap_active() && free_memory < 0) {
arc_no_grow = B_TRUE;
arc_warm = B_TRUE;
/*
* Wait at least zfs_grow_retry (default 5) seconds
* before considering growing.
*/
arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
return (B_TRUE);
} else if (free_memory < arc_c >> arc_no_grow_shift) {
arc_no_grow = B_TRUE;
} else if (gethrtime() >= arc_growtime) {
arc_no_grow = B_FALSE;
}
/*
* Called unconditionally every 60 seconds to reclaim unused
* zstd compression and decompression context. This is done
* here to avoid the need for an independent thread.
*/
if (!((reap_cb_check_counter++) % 60))
zfs_zstd_cache_reap_now();
return (B_FALSE);
}
/*
* Keep enough free memory in the system by reaping the ARC's kmem
* caches. To cause more slabs to be reapable, we may reduce the
* target size of the cache (arc_c), causing the arc_evict_cb()
* to free more buffers.
*/
/* ARGSUSED */
static void
arc_reap_cb(void *arg, zthr_t *zthr)
{
int64_t free_memory;
fstrans_cookie_t cookie = spl_fstrans_mark();
/*
* Kick off asynchronous kmem_reap()'s of all our caches.
*/
arc_kmem_reap_soon();
/*
* Wait at least arc_kmem_cache_reap_retry_ms between
* arc_kmem_reap_soon() calls. Without this check it is possible to
* end up in a situation where we spend lots of time reaping
* caches, while we're near arc_c_min. Waiting here also gives the
* subsequent free memory check a chance of finding that the
* asynchronous reap has already freed enough memory, and we don't
* need to call arc_reduce_target_size().
*/
delay((hz * arc_kmem_cache_reap_retry_ms + 999) / 1000);
/*
* Reduce the target size as needed to maintain the amount of free
* memory in the system at a fraction of the arc_size (1/128th by
* default). If oversubscribed (free_memory < 0) then reduce the
* target arc_size by the deficit amount plus the fractional
* amount. If free memory is positive but less than the fractional
* amount, reduce by what is needed to hit the fractional amount.
*/
free_memory = arc_available_memory();
int64_t to_free =
(arc_c >> arc_shrink_shift) - free_memory;
if (to_free > 0) {
arc_reduce_target_size(to_free);
}
spl_fstrans_unmark(cookie);
}
#ifdef _KERNEL
/*
* Determine the amount of memory eligible for eviction contained in the
* ARC. All clean data reported by the ghost lists can always be safely
* evicted. Due to arc_c_min, the same does not hold for all clean data
* contained by the regular mru and mfu lists.
*
* In the case of the regular mru and mfu lists, we need to report as
* much clean data as possible, such that evicting that same reported
* data will not bring arc_size below arc_c_min. Thus, in certain
* circumstances, the total amount of clean data in the mru and mfu
* lists might not actually be evictable.
*
* The following two distinct cases are accounted for:
*
* 1. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is greater than or equal to arc_c_min.
* (i.e. amount of dirty data >= arc_c_min)
*
* This is the easy case; all clean data contained by the mru and mfu
* lists is evictable. Evicting all clean data can only drop arc_size
* to the amount of dirty data, which is greater than arc_c_min.
*
* 2. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is less than arc_c_min.
* (i.e. arc_c_min > amount of dirty data)
*
* 2.1. arc_size is greater than or equal arc_c_min.
* (i.e. arc_size >= arc_c_min > amount of dirty data)
*
* In this case, not all clean data from the regular mru and mfu
* lists is actually evictable; we must leave enough clean data
* to keep arc_size above arc_c_min. Thus, the maximum amount of
* evictable data from the two lists combined, is exactly the
* difference between arc_size and arc_c_min.
*
* 2.2. arc_size is less than arc_c_min
* (i.e. arc_c_min > arc_size > amount of dirty data)
*
* In this case, none of the data contained in the mru and mfu
* lists is evictable, even if it's clean. Since arc_size is
* already below arc_c_min, evicting any more would only
* increase this negative difference.
*/
#endif /* _KERNEL */
/*
* Adapt arc info given the number of bytes we are trying to add and
* the state that we are coming from. This function is only called
* when we are adding new content to the cache.
*/
static void
arc_adapt(int bytes, arc_state_t *state)
{
int mult;
uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
int64_t mrug_size = zfs_refcount_count(&arc_mru_ghost->arcs_size);
int64_t mfug_size = zfs_refcount_count(&arc_mfu_ghost->arcs_size);
ASSERT(bytes > 0);
/*
* Adapt the target size of the MRU list:
* - if we just hit in the MRU ghost list, then increase
* the target size of the MRU list.
* - if we just hit in the MFU ghost list, then increase
* the target size of the MFU list by decreasing the
* target size of the MRU list.
*/
if (state == arc_mru_ghost) {
mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size);
if (!zfs_arc_p_dampener_disable)
mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
} else if (state == arc_mfu_ghost) {
uint64_t delta;
mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size);
if (!zfs_arc_p_dampener_disable)
mult = MIN(mult, 10);
delta = MIN(bytes * mult, arc_p);
arc_p = MAX(arc_p_min, arc_p - delta);
}
ASSERT((int64_t)arc_p >= 0);
/*
* Wake reap thread if we do not have any available memory
*/
if (arc_reclaim_needed()) {
zthr_wakeup(arc_reap_zthr);
return;
}
if (arc_no_grow)
return;
if (arc_c >= arc_c_max)
return;
/*
* If we're within (2 * maxblocksize) bytes of the target
* cache size, increment the target cache size
*/
ASSERT3U(arc_c, >=, 2ULL << SPA_MAXBLOCKSHIFT);
if (aggsum_upper_bound(&arc_sums.arcstat_size) >=
arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
atomic_add_64(&arc_c, (int64_t)bytes);
if (arc_c > arc_c_max)
arc_c = arc_c_max;
else if (state == arc_anon)
atomic_add_64(&arc_p, (int64_t)bytes);
if (arc_p > arc_c)
arc_p = arc_c;
}
ASSERT((int64_t)arc_p >= 0);
}
/*
* Check if arc_size has grown past our upper threshold, determined by
* zfs_arc_overflow_shift.
*/
static arc_ovf_level_t
arc_is_overflowing(boolean_t use_reserve)
{
/* Always allow at least one block of overflow */
int64_t overflow = MAX(SPA_MAXBLOCKSIZE,
arc_c >> zfs_arc_overflow_shift);
/*
* We just compare the lower bound here for performance reasons. Our
* primary goals are to make sure that the arc never grows without
* bound, and that it can reach its maximum size. This check
* accomplishes both goals. The maximum amount we could run over by is
* 2 * aggsum_borrow_multiplier * NUM_CPUS * the average size of a block
* in the ARC. In practice, that's in the tens of MB, which is low
* enough to be safe.
*/
int64_t over = aggsum_lower_bound(&arc_sums.arcstat_size) -
arc_c - overflow / 2;
if (!use_reserve)
overflow /= 2;
return (over < 0 ? ARC_OVF_NONE :
over < overflow ? ARC_OVF_SOME : ARC_OVF_SEVERE);
}
static abd_t *
arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag,
int alloc_flags)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, alloc_flags);
if (type == ARC_BUFC_METADATA) {
return (abd_alloc(size, B_TRUE));
} else {
ASSERT(type == ARC_BUFC_DATA);
return (abd_alloc(size, B_FALSE));
}
}
static void *
arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag, ARC_HDR_DO_ADAPT);
if (type == ARC_BUFC_METADATA) {
return (zio_buf_alloc(size));
} else {
ASSERT(type == ARC_BUFC_DATA);
return (zio_data_buf_alloc(size));
}
}
/*
* Wait for the specified amount of data (in bytes) to be evicted from the
* ARC, and for there to be sufficient free memory in the system. Waiting for
* eviction ensures that the memory used by the ARC decreases. Waiting for
* free memory ensures that the system won't run out of free pages, regardless
* of ARC behavior and settings. See arc_lowmem_init().
*/
void
arc_wait_for_eviction(uint64_t amount, boolean_t use_reserve)
{
switch (arc_is_overflowing(use_reserve)) {
case ARC_OVF_NONE:
return;
case ARC_OVF_SOME:
/*
* This is a bit racy without taking arc_evict_lock, but the
* worst that can happen is we either call zthr_wakeup() extra
* time due to race with other thread here, or the set flag
* get cleared by arc_evict_cb(), which is unlikely due to
* big hysteresis, but also not important since at this level
* of overflow the eviction is purely advisory. Same time
* taking the global lock here every time without waiting for
* the actual eviction creates a significant lock contention.
*/
if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
return;
case ARC_OVF_SEVERE:
default:
{
arc_evict_waiter_t aw;
list_link_init(&aw.aew_node);
cv_init(&aw.aew_cv, NULL, CV_DEFAULT, NULL);
uint64_t last_count = 0;
mutex_enter(&arc_evict_lock);
if (!list_is_empty(&arc_evict_waiters)) {
arc_evict_waiter_t *last =
list_tail(&arc_evict_waiters);
last_count = last->aew_count;
} else if (!arc_evict_needed) {
arc_evict_needed = B_TRUE;
zthr_wakeup(arc_evict_zthr);
}
/*
* Note, the last waiter's count may be less than
* arc_evict_count if we are low on memory in which
* case arc_evict_state_impl() may have deferred
* wakeups (but still incremented arc_evict_count).
*/
aw.aew_count = MAX(last_count, arc_evict_count) + amount;
list_insert_tail(&arc_evict_waiters, &aw);
arc_set_need_free();
DTRACE_PROBE3(arc__wait__for__eviction,
uint64_t, amount,
uint64_t, arc_evict_count,
uint64_t, aw.aew_count);
/*
* We will be woken up either when arc_evict_count reaches
* aew_count, or when the ARC is no longer overflowing and
* eviction completes.
* In case of "false" wakeup, we will still be on the list.
*/
do {
cv_wait(&aw.aew_cv, &arc_evict_lock);
} while (list_link_active(&aw.aew_node));
mutex_exit(&arc_evict_lock);
cv_destroy(&aw.aew_cv);
}
}
}
/*
* Allocate a block and return it to the caller. If we are hitting the
* hard limit for the cache size, we must sleep, waiting for the eviction
* thread to catch up. If we're past the target size but below the hard
* limit, we'll only signal the reclaim thread and continue on.
*/
static void
arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag,
int alloc_flags)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
if (alloc_flags & ARC_HDR_DO_ADAPT)
arc_adapt(size, state);
/*
* If arc_size is currently overflowing, we must be adding data
* faster than we are evicting. To ensure we don't compound the
* problem by adding more data and forcing arc_size to grow even
* further past it's target size, we wait for the eviction thread to
* make some progress. We also wait for there to be sufficient free
* memory in the system, as measured by arc_free_memory().
*
* Specifically, we wait for zfs_arc_eviction_pct percent of the
* requested size to be evicted. This should be more than 100%, to
* ensure that that progress is also made towards getting arc_size
* under arc_c. See the comment above zfs_arc_eviction_pct.
*/
arc_wait_for_eviction(size * zfs_arc_eviction_pct / 100,
alloc_flags & ARC_HDR_USE_RESERVE);
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
arc_space_consume(size, ARC_SPACE_META);
} else {
arc_space_consume(size, ARC_SPACE_DATA);
}
/*
* Update the state size. Note that ghost states have a
* "ghost size" and so don't need to be updated.
*/
if (!GHOST_STATE(state)) {
(void) zfs_refcount_add_many(&state->arcs_size, size, tag);
/*
* If this is reached via arc_read, the link is
* protected by the hash lock. If reached via
* arc_buf_alloc, the header should not be accessed by
* any other thread. And, if reached via arc_read_done,
* the hash lock will protect it if it's found in the
* hash table; otherwise no other thread should be
* trying to [add|remove]_reference it.
*/
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
(void) zfs_refcount_add_many(&state->arcs_esize[type],
size, tag);
}
/*
* If we are growing the cache, and we are adding anonymous
* data, and we have outgrown arc_p, update arc_p
*/
if (aggsum_upper_bound(&arc_sums.arcstat_size) < arc_c &&
hdr->b_l1hdr.b_state == arc_anon &&
(zfs_refcount_count(&arc_anon->arcs_size) +
zfs_refcount_count(&arc_mru->arcs_size) > arc_p))
arc_p = MIN(arc_c, arc_p + size);
}
}
static void
arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size, void *tag)
{
arc_free_data_impl(hdr, size, tag);
abd_free(abd);
}
static void
arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_free_data_impl(hdr, size, tag);
if (type == ARC_BUFC_METADATA) {
zio_buf_free(buf, size);
} else {
ASSERT(type == ARC_BUFC_DATA);
zio_data_buf_free(buf, size);
}
}
/*
* Free the arc data buffer.
*/
static void
arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_esize[type],
size, tag);
}
(void) zfs_refcount_remove_many(&state->arcs_size, size, tag);
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
}
/*
* This routine is called whenever a buffer is accessed.
* NOTE: the hash lock is dropped in this function.
*/
static void
arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
{
clock_t now;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT(HDR_HAS_L1HDR(hdr));
if (hdr->b_l1hdr.b_state == arc_anon) {
/*
* This buffer is not in the cache, and does not
* appear in our "ghost" list. Add the new buffer
* to the MRU state.
*/
ASSERT0(hdr->b_l1hdr.b_arc_access);
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mru, hdr, hash_lock);
} else if (hdr->b_l1hdr.b_state == arc_mru) {
now = ddi_get_lbolt();
/*
* If this buffer is here because of a prefetch, then either:
* - clear the flag if this is a "referencing" read
* (any subsequent access will bump this into the MFU state).
* or
* - move the buffer to the head of the list if this is
* another prefetch (to make it less likely to be evicted).
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
/* link protected by hash lock */
ASSERT(multilist_link_active(
&hdr->b_l1hdr.b_arc_node));
} else {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH |
ARC_FLAG_PRESCIENT_PREFETCH);
hdr->b_l1hdr.b_mru_hits++;
ARCSTAT_BUMP(arcstat_mru_hits);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
hdr->b_l1hdr.b_arc_access = now;
return;
}
/*
* This buffer has been "accessed" only once so far,
* but it is still in the cache. Move it to the MFU
* state.
*/
if (ddi_time_after(now, hdr->b_l1hdr.b_arc_access +
ARC_MINTIME)) {
/*
* More than 125ms have passed since we
* instantiated this buffer. Move it to the
* most frequently used state.
*/
hdr->b_l1hdr.b_arc_access = now;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr, hash_lock);
}
hdr->b_l1hdr.b_mru_hits++;
ARCSTAT_BUMP(arcstat_mru_hits);
} else if (hdr->b_l1hdr.b_state == arc_mru_ghost) {
arc_state_t *new_state;
/*
* This buffer has been "accessed" recently, but
* was evicted from the cache. Move it to the
* MFU state.
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
new_state = arc_mru;
if (zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH |
ARC_FLAG_PRESCIENT_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
} else {
new_state = arc_mfu;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
}
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
arc_change_state(new_state, hdr, hash_lock);
hdr->b_l1hdr.b_mru_ghost_hits++;
ARCSTAT_BUMP(arcstat_mru_ghost_hits);
} else if (hdr->b_l1hdr.b_state == arc_mfu) {
/*
* This buffer has been accessed more than once and is
* still in the cache. Keep it in the MFU state.
*
* NOTE: an add_reference() that occurred when we did
* the arc_read() will have kicked this off the list.
* If it was a prefetch, we will explicitly move it to
* the head of the list now.
*/
hdr->b_l1hdr.b_mfu_hits++;
ARCSTAT_BUMP(arcstat_mfu_hits);
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
} else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
arc_state_t *new_state = arc_mfu;
/*
* This buffer has been accessed more than once but has
* been evicted from the cache. Move it back to the
* MFU state.
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
/*
* This is a prefetch access...
* move this block back to the MRU state.
*/
new_state = arc_mru;
}
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(new_state, hdr, hash_lock);
hdr->b_l1hdr.b_mfu_ghost_hits++;
ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
} else if (hdr->b_l1hdr.b_state == arc_l2c_only) {
/*
* This buffer is on the 2nd Level ARC.
*/
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr, hash_lock);
} else {
cmn_err(CE_PANIC, "invalid arc state 0x%p",
hdr->b_l1hdr.b_state);
}
}
/*
* This routine is called by dbuf_hold() to update the arc_access() state
* which otherwise would be skipped for entries in the dbuf cache.
*/
void
arc_buf_access(arc_buf_t *buf)
{
mutex_enter(&buf->b_evict_lock);
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Avoid taking the hash_lock when possible as an optimization.
* The header must be checked again under the hash_lock in order
* to handle the case where it is concurrently being released.
*/
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
mutex_exit(&buf->b_evict_lock);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
mutex_exit(hash_lock);
mutex_exit(&buf->b_evict_lock);
ARCSTAT_BUMP(arcstat_access_skip);
return;
}
mutex_exit(&buf->b_evict_lock);
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu);
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, hash_lock);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr) && !HDR_PRESCIENT_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data, metadata, hits);
}
/* a generic arc_read_done_func_t which you can use */
/* ARGSUSED */
void
arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
if (buf == NULL)
return;
bcopy(buf->b_data, arg, arc_buf_size(buf));
arc_buf_destroy(buf, arg);
}
/* a generic arc_read_done_func_t */
/* ARGSUSED */
void
arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
arc_buf_t **bufp = arg;
if (buf == NULL) {
ASSERT(zio == NULL || zio->io_error != 0);
*bufp = NULL;
} else {
ASSERT(zio == NULL || zio->io_error == 0);
*bufp = buf;
ASSERT(buf->b_data != NULL);
}
}
static void
arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
{
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF);
} else {
if (HDR_COMPRESSION_ENABLED(hdr)) {
ASSERT3U(arc_hdr_get_compress(hdr), ==,
BP_GET_COMPRESS(bp));
}
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp));
ASSERT3U(!!HDR_PROTECTED(hdr), ==, BP_IS_PROTECTED(bp));
}
}
static void
arc_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
arc_buf_hdr_t *hdr = zio->io_private;
kmutex_t *hash_lock = NULL;
arc_callback_t *callback_list;
arc_callback_t *acb;
boolean_t freeable = B_FALSE;
/*
* The hdr was inserted into hash-table and removed from lists
* prior to starting I/O. We should find this header, since
* it's in the hash table, and it should be legit since it's
* not possible to evict it during the I/O. The only possible
* reason for it not to be found is if we were freed during the
* read.
*/
if (HDR_IN_HASH_TABLE(hdr)) {
arc_buf_hdr_t *found;
ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
ASSERT3U(hdr->b_dva.dva_word[0], ==,
BP_IDENTITY(zio->io_bp)->dva_word[0]);
ASSERT3U(hdr->b_dva.dva_word[1], ==,
BP_IDENTITY(zio->io_bp)->dva_word[1]);
found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock);
ASSERT((found == hdr &&
DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
(found == hdr && HDR_L2_READING(hdr)));
ASSERT3P(hash_lock, !=, NULL);
}
if (BP_IS_PROTECTED(bp)) {
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
if (BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) {
void *tmpbuf;
tmpbuf = abd_borrow_buf_copy(zio->io_abd,
sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmpbuf,
hdr->b_crypt_hdr.b_mac);
abd_return_buf(zio->io_abd, tmpbuf,
sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
}
}
if (zio->io_error == 0) {
/* byteswap if necessary */
if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
if (BP_GET_LEVEL(zio->io_bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
if (!HDR_L2_READING(hdr)) {
hdr->b_complevel = zio->io_prop.zp_complevel;
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED);
if (l2arc_noprefetch && HDR_PREFETCH(hdr))
arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE);
callback_list = hdr->b_l1hdr.b_acb;
ASSERT3P(callback_list, !=, NULL);
if (hash_lock && zio->io_error == 0 &&
hdr->b_l1hdr.b_state == arc_anon) {
/*
* Only call arc_access on anonymous buffers. This is because
* if we've issued an I/O for an evicted buffer, we've already
* called arc_access (to prevent any simultaneous readers from
* getting confused).
*/
arc_access(hdr, hash_lock);
}
/*
* If a read request has a callback (i.e. acb_done is not NULL), then we
* make a buf containing the data according to the parameters which were
* passed in. The implementation of arc_buf_alloc_impl() ensures that we
* aren't needlessly decompressing the data multiple times.
*/
int callback_cnt = 0;
for (acb = callback_list; acb != NULL; acb = acb->acb_next) {
if (!acb->acb_done || acb->acb_nobuf)
continue;
callback_cnt++;
if (zio->io_error != 0)
continue;
int error = arc_buf_alloc_impl(hdr, zio->io_spa,
&acb->acb_zb, acb->acb_private, acb->acb_encrypted,
acb->acb_compressed, acb->acb_noauth, B_TRUE,
&acb->acb_buf);
/*
* Assert non-speculative zios didn't fail because an
* encryption key wasn't loaded
*/
ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) ||
error != EACCES);
/*
* If we failed to decrypt, report an error now (as the zio
* layer would have done if it had done the transforms).
*/
if (error == ECKSUM) {
ASSERT(BP_IS_PROTECTED(bp));
error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(zio->io_spa, &acb->acb_zb);
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
zio->io_spa, NULL, &acb->acb_zb, zio, 0);
}
}
if (error != 0) {
/*
* Decompression or decryption failed. Set
* io_error so that when we call acb_done
* (below), we will indicate that the read
* failed. Note that in the unusual case
* where one callback is compressed and another
* uncompressed, we will mark all of them
* as failed, even though the uncompressed
* one can't actually fail. In this case,
* the hdr will not be anonymous, because
* if there are multiple callbacks, it's
* because multiple threads found the same
* arc buf in the hash table.
*/
zio->io_error = error;
}
}
/*
* If there are multiple callbacks, we must have the hash lock,
* because the only way for multiple threads to find this hdr is
* in the hash table. This ensures that if there are multiple
* callbacks, the hdr is not anonymous. If it were anonymous,
* we couldn't use arc_buf_destroy() in the error case below.
*/
ASSERT(callback_cnt < 2 || hash_lock != NULL);
hdr->b_l1hdr.b_acb = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (callback_cnt == 0)
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
ASSERT(zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
callback_list != NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hdr->b_l1hdr.b_state != arc_anon)
arc_change_state(arc_anon, hdr, hash_lock);
if (HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
}
/*
* Broadcast before we drop the hash_lock to avoid the possibility
* that the hdr (and hence the cv) might be freed before we get to
* the cv_broadcast().
*/
cv_broadcast(&hdr->b_l1hdr.b_cv);
if (hash_lock != NULL) {
mutex_exit(hash_lock);
} else {
/*
* This block was freed while we waited for the read to
* complete. It has been removed from the hash table and
* moved to the anonymous state (so that it won't show up
* in the cache).
*/
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
freeable = zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
}
/* execute each callback and free its structure */
while ((acb = callback_list) != NULL) {
if (acb->acb_done != NULL) {
if (zio->io_error != 0 && acb->acb_buf != NULL) {
/*
* If arc_buf_alloc_impl() fails during
* decompression, the buf will still be
* allocated, and needs to be freed here.
*/
arc_buf_destroy(acb->acb_buf,
acb->acb_private);
acb->acb_buf = NULL;
}
acb->acb_done(zio, &zio->io_bookmark, zio->io_bp,
acb->acb_buf, acb->acb_private);
}
if (acb->acb_zio_dummy != NULL) {
acb->acb_zio_dummy->io_error = zio->io_error;
zio_nowait(acb->acb_zio_dummy);
}
callback_list = acb->acb_next;
kmem_free(acb, sizeof (arc_callback_t));
}
if (freeable)
arc_hdr_destroy(hdr);
}
/*
* "Read" the block at the specified DVA (in bp) via the
* cache. If the block is found in the cache, invoke the provided
* callback immediately and return. Note that the `zio' parameter
* in the callback will be NULL in this case, since no IO was
* required. If the block is not in the cache pass the read request
* on to the spa with a substitute callback function, so that the
* requested block will be added to the cache.
*
* If a read request arrives for a block that has a read in-progress,
* either wait for the in-progress read to complete (and return the
* results); or, if this is a read with a "done" func, add a record
* to the read to invoke the "done" func when the read completes,
* and return; or just return.
*
* arc_read_done() will invoke all the requested "done" functions
* for readers of this block.
*/
int
arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
arc_read_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = NULL;
kmutex_t *hash_lock = NULL;
zio_t *rzio;
uint64_t guid = spa_load_guid(spa);
boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW_COMPRESS) != 0;
boolean_t encrypted_read = BP_IS_ENCRYPTED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t noauth_read = BP_IS_AUTHENTICATED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t embedded_bp = !!BP_IS_EMBEDDED(bp);
boolean_t no_buf = *arc_flags & ARC_FLAG_NO_BUF;
int rc = 0;
ASSERT(!embedded_bp ||
BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_REDACTED(bp));
/*
* Normally SPL_FSTRANS will already be set since kernel threads which
* expect to call the DMU interfaces will set it when created. System
* calls are similarly handled by setting/cleaning the bit in the
* registered callback (module/os/.../zfs/zpl_*).
*
* External consumers such as Lustre which call the exported DMU
* interfaces may not have set SPL_FSTRANS. To avoid a deadlock
* on the hash_lock always set and clear the bit.
*/
fstrans_cookie_t cookie = spl_fstrans_mark();
top:
+ /*
+ * Verify the block pointer contents are reasonable. This should
+ * always be the case since the blkptr is protected by a checksum.
+ * However, if there is damage it's desirable to detect this early
+ * and treat it as a checksum error. This allows an alternate blkptr
+ * to be tried when one is available (e.g. ditto blocks).
+ */
+ if (!zfs_blkptr_verify(spa, bp, zio_flags & ZIO_FLAG_CONFIG_WRITER,
+ BLK_VERIFY_LOG)) {
+ rc = SET_ERROR(ECKSUM);
+ goto out;
+ }
+
if (!embedded_bp) {
/*
* Embedded BP's have no DVA and require no I/O to "read".
* Create an anonymous arc buf to back it.
*/
- if (!zfs_blkptr_verify(spa, bp, zio_flags &
- ZIO_FLAG_CONFIG_WRITER, BLK_VERIFY_LOG)) {
- rc = SET_ERROR(ECKSUM);
- goto out;
- }
-
hdr = buf_hash_find(guid, bp, &hash_lock);
}
/*
* Determine if we have an L1 cache hit or a cache miss. For simplicity
* we maintain encrypted data separately from compressed / uncompressed
* data. If the user is requesting raw encrypted data and we don't have
* that in the header we will read from disk to guarantee that we can
* get it even if the encryption keys aren't loaded.
*/
if (hdr != NULL && HDR_HAS_L1HDR(hdr) && (HDR_HAS_RABD(hdr) ||
(hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) {
arc_buf_t *buf = NULL;
*arc_flags |= ARC_FLAG_CACHED;
if (HDR_IO_IN_PROGRESS(hdr)) {
zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head;
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_cached_only_in_progress);
rc = SET_ERROR(ENOENT);
goto out;
}
ASSERT3P(head_zio, !=, NULL);
if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
priority == ZIO_PRIORITY_SYNC_READ) {
/*
* This is a sync read that needs to wait for
* an in-flight async read. Request that the
* zio have its priority upgraded.
*/
zio_change_priority(head_zio, priority);
DTRACE_PROBE1(arc__async__upgrade__sync,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_async_upgrade_sync);
}
if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREDICTIVE_PREFETCH);
}
if (*arc_flags & ARC_FLAG_WAIT) {
cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
mutex_exit(hash_lock);
goto top;
}
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
if (done) {
arc_callback_t *acb = NULL;
acb = kmem_zalloc(sizeof (arc_callback_t),
KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_nobuf = no_buf;
acb->acb_zb = *zb;
if (pio != NULL)
acb->acb_zio_dummy = zio_null(pio,
spa, NULL, NULL, NULL, zio_flags);
ASSERT3P(acb->acb_done, !=, NULL);
acb->acb_zio_head = head_zio;
acb->acb_next = hdr->b_l1hdr.b_acb;
hdr->b_l1hdr.b_acb = acb;
}
mutex_exit(hash_lock);
goto out;
}
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu);
if (done && !no_buf) {
if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
/*
* This is a demand read which does not have to
* wait for i/o because we did a predictive
* prefetch i/o for it, which has completed.
*/
DTRACE_PROBE1(
arc__demand__hit__predictive__prefetch,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(
arcstat_demand_hit_predictive_prefetch);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREDICTIVE_PREFETCH);
}
if (hdr->b_flags & ARC_FLAG_PRESCIENT_PREFETCH) {
ARCSTAT_BUMP(
arcstat_demand_hit_prescient_prefetch);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PRESCIENT_PREFETCH);
}
ASSERT(!embedded_bp || !BP_IS_HOLE(bp));
/* Get a buf with the desired data in it. */
rc = arc_buf_alloc_impl(hdr, spa, zb, private,
encrypted_read, compressed_read, noauth_read,
B_TRUE, &buf);
if (rc == ECKSUM) {
/*
* Convert authentication and decryption errors
* to EIO (and generate an ereport if needed)
* before leaving the ARC.
*/
rc = SET_ERROR(EIO);
if ((zio_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(spa, zb);
(void) zfs_ereport_post(
FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0);
}
}
if (rc != 0) {
(void) remove_reference(hdr, hash_lock,
private);
arc_buf_destroy_impl(buf);
buf = NULL;
}
/* assert any errors weren't due to unloaded keys */
ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
rc != EACCES);
} else if (*arc_flags & ARC_FLAG_PREFETCH &&
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, hash_lock);
if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
if (*arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
data, metadata, hits);
if (done)
done(NULL, zb, bp, buf, private);
} else {
uint64_t lsize = BP_GET_LSIZE(bp);
uint64_t psize = BP_GET_PSIZE(bp);
arc_callback_t *acb;
vdev_t *vd = NULL;
uint64_t addr = 0;
boolean_t devw = B_FALSE;
uint64_t size;
abd_t *hdr_abd;
int alloc_flags = encrypted_read ? ARC_HDR_ALLOC_RDATA : 0;
if (*arc_flags & ARC_FLAG_CACHED_ONLY) {
rc = SET_ERROR(ENOENT);
if (hash_lock != NULL)
mutex_exit(hash_lock);
goto out;
}
if (hdr == NULL) {
/*
* This block is not in the cache or it has
* embedded data.
*/
arc_buf_hdr_t *exists = NULL;
arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
BP_IS_PROTECTED(bp), BP_GET_COMPRESS(bp), 0, type);
if (!embedded_bp) {
hdr->b_dva = *BP_IDENTITY(bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
exists = buf_hash_insert(hdr, &hash_lock);
}
if (exists != NULL) {
/* somebody beat us to the hash insert */
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_hdr_destroy(hdr);
goto top; /* restart the IO request */
}
alloc_flags |= ARC_HDR_DO_ADAPT;
} else {
/*
* This block is in the ghost cache or encrypted data
* was requested and we didn't have it. If it was
* L2-only (and thus didn't have an L1 hdr),
* we realloc the header to add an L1 hdr.
*/
if (!HDR_HAS_L1HDR(hdr)) {
hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
hdr_full_cache);
}
if (GHOST_STATE(hdr->b_l1hdr.b_state)) {
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT0(zfs_refcount_count(
&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
} else if (HDR_IO_IN_PROGRESS(hdr)) {
/*
* If this header already had an IO in progress
* and we are performing another IO to fetch
* encrypted data we must wait until the first
* IO completes so as not to confuse
* arc_read_done(). This should be very rare
* and so the performance impact shouldn't
* matter.
*/
cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* This is a delicate dance that we play here.
* This hdr might be in the ghost list so we access
* it to move it out of the ghost list before we
* initiate the read. If it's a prefetch then
* it won't have a callback so we'll remove the
* reference that arc_buf_alloc_impl() created. We
* do this after we've called arc_access() to
* avoid hitting an assert in remove_reference().
*/
arc_adapt(arc_hdr_size(hdr), hdr->b_l1hdr.b_state);
arc_access(hdr, hash_lock);
}
arc_hdr_alloc_abd(hdr, alloc_flags);
if (encrypted_read) {
ASSERT(HDR_HAS_RABD(hdr));
size = HDR_GET_PSIZE(hdr);
hdr_abd = hdr->b_crypt_hdr.b_rabd;
zio_flags |= ZIO_FLAG_RAW;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
size = arc_hdr_size(hdr);
hdr_abd = hdr->b_l1hdr.b_pabd;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
/*
* For authenticated bp's, we do not ask the ZIO layer
* to authenticate them since this will cause the entire
* IO to fail if the key isn't loaded. Instead, we
* defer authentication until arc_buf_fill(), which will
* verify the data when the key is available.
*/
if (BP_IS_AUTHENTICATED(bp))
zio_flags |= ZIO_FLAG_RAW_ENCRYPT;
}
if (*arc_flags & ARC_FLAG_PREFETCH &&
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_decrement_state(hdr);
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
if (HDR_HAS_L2HDR(hdr))
l2arc_hdr_arcstats_increment_state(hdr);
}
if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
if (*arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (BP_IS_AUTHENTICATED(bp))
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
if (BP_GET_LEVEL(bp) > 0)
arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT);
if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH);
ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_zb = *zb;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
hdr->b_l1hdr.b_acb = acb;
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (HDR_HAS_L2HDR(hdr) &&
(vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
devw = hdr->b_l2hdr.b_dev->l2ad_writing;
addr = hdr->b_l2hdr.b_daddr;
/*
* Lock out L2ARC device removal.
*/
if (vdev_is_dead(vd) ||
!spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
vd = NULL;
}
/*
* We count both async reads and scrub IOs as asynchronous so
* that both can be upgraded in the event of a cache hit while
* the read IO is still in-flight.
*/
if (priority == ZIO_PRIORITY_ASYNC_READ ||
priority == ZIO_PRIORITY_SCRUB)
arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
else
arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
/*
* At this point, we have a level 1 cache miss or a blkptr
* with embedded data. Try again in L2ARC if possible.
*/
ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize);
/*
* Skip ARC stat bump for block pointers with embedded
* data. The data are read from the blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr,
blkptr_t *, bp, uint64_t, lsize,
zbookmark_phys_t *, zb);
ARCSTAT_BUMP(arcstat_misses);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data,
metadata, misses);
zfs_racct_read(size, 1);
}
/* Check if the spa even has l2 configured */
const boolean_t spa_has_l2 = l2arc_ndev != 0 &&
spa->spa_l2cache.sav_count > 0;
if (vd != NULL && spa_has_l2 && !(l2arc_norw && devw)) {
/*
* Read from the L2ARC if the following are true:
* 1. The L2ARC vdev was previously cached.
* 2. This buffer still has L2ARC metadata.
* 3. This buffer isn't currently writing to the L2ARC.
* 4. The L2ARC entry wasn't evicted, which may
* also have invalidated the vdev.
* 5. This isn't prefetch or l2arc_noprefetch is 0.
*/
if (HDR_HAS_L2HDR(hdr) &&
!HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
!(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
l2arc_read_callback_t *cb;
abd_t *abd;
uint64_t asize;
DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_hits);
hdr->b_l2hdr.b_hits++;
cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
KM_SLEEP);
cb->l2rcb_hdr = hdr;
cb->l2rcb_bp = *bp;
cb->l2rcb_zb = *zb;
cb->l2rcb_flags = zio_flags;
/*
* When Compressed ARC is disabled, but the
* L2ARC block is compressed, arc_hdr_size()
* will have returned LSIZE rather than PSIZE.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr) &&
HDR_GET_PSIZE(hdr) != 0) {
size = HDR_GET_PSIZE(hdr);
}
asize = vdev_psize_to_asize(vd, size);
if (asize != size) {
abd = abd_alloc_for_io(asize,
HDR_ISTYPE_METADATA(hdr));
cb->l2rcb_abd = abd;
} else {
abd = hdr_abd;
}
ASSERT(addr >= VDEV_LABEL_START_SIZE &&
addr + asize <= vd->vdev_psize -
VDEV_LABEL_END_SIZE);
/*
* l2arc read. The SCL_L2ARC lock will be
* released by l2arc_read_done().
* Issue a null zio if the underlying buffer
* was squashed to zero size by compression.
*/
ASSERT3U(arc_hdr_get_compress(hdr), !=,
ZIO_COMPRESS_EMPTY);
rzio = zio_read_phys(pio, vd, addr,
asize, abd,
ZIO_CHECKSUM_OFF,
l2arc_read_done, cb, priority,
zio_flags | ZIO_FLAG_DONT_CACHE |
ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY, B_FALSE);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
zio_t *, rzio);
ARCSTAT_INCR(arcstat_l2_read_bytes,
HDR_GET_PSIZE(hdr));
if (*arc_flags & ARC_FLAG_NOWAIT) {
zio_nowait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_WAIT);
if (zio_wait(rzio) == 0)
goto out;
/* l2arc read error; goto zio_read() */
if (hash_lock != NULL)
mutex_enter(hash_lock);
} else {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
if (HDR_L2_WRITING(hdr))
ARCSTAT_BUMP(arcstat_l2_rw_clash);
spa_config_exit(spa, SCL_L2ARC, vd);
}
} else {
if (vd != NULL)
spa_config_exit(spa, SCL_L2ARC, vd);
/*
* Only a spa with l2 should contribute to l2
* miss stats. (Including the case of having a
* faulted cache device - that's also a miss.)
*/
if (spa_has_l2) {
/*
* Skip ARC stat bump for block pointers with
* embedded data. The data are read from the
* blkptr itself via
* decode_embedded_bp_compressed().
*/
if (!embedded_bp) {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
}
}
}
rzio = zio_read(pio, spa, bp, hdr_abd, size,
arc_read_done, hdr, priority, zio_flags, zb);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
if (*arc_flags & ARC_FLAG_WAIT) {
rc = zio_wait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
zio_nowait(rzio);
}
out:
/* embedded bps don't actually go to disk */
if (!embedded_bp)
spa_read_history_add(spa, zb, *arc_flags);
spl_fstrans_unmark(cookie);
return (rc);
}
arc_prune_t *
arc_add_prune_callback(arc_prune_func_t *func, void *private)
{
arc_prune_t *p;
p = kmem_alloc(sizeof (*p), KM_SLEEP);
p->p_pfunc = func;
p->p_private = private;
list_link_init(&p->p_node);
zfs_refcount_create(&p->p_refcnt);
mutex_enter(&arc_prune_mtx);
zfs_refcount_add(&p->p_refcnt, &arc_prune_list);
list_insert_head(&arc_prune_list, p);
mutex_exit(&arc_prune_mtx);
return (p);
}
void
arc_remove_prune_callback(arc_prune_t *p)
{
boolean_t wait = B_FALSE;
mutex_enter(&arc_prune_mtx);
list_remove(&arc_prune_list, p);
if (zfs_refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
wait = B_TRUE;
mutex_exit(&arc_prune_mtx);
/* wait for arc_prune_task to finish */
if (wait)
taskq_wait_outstanding(arc_prune_taskq, 0);
ASSERT0(zfs_refcount_count(&p->p_refcnt));
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
/*
* Notify the arc that a block was freed, and thus will never be used again.
*/
void
arc_freed(spa_t *spa, const blkptr_t *bp)
{
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
uint64_t guid = spa_load_guid(spa);
ASSERT(!BP_IS_EMBEDDED(bp));
hdr = buf_hash_find(guid, bp, &hash_lock);
if (hdr == NULL)
return;
/*
* We might be trying to free a block that is still doing I/O
* (i.e. prefetch) or has a reference (i.e. a dedup-ed,
* dmu_sync-ed block). If this block is being prefetched, then it
* would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr
* until the I/O completes. A block may also have a reference if it is
* part of a dedup-ed, dmu_synced write. The dmu_sync() function would
* have written the new block to its final resting place on disk but
* without the dedup flag set. This would have left the hdr in the MRU
* state and discoverable. When the txg finally syncs it detects that
* the block was overridden in open context and issues an override I/O.
* Since this is a dedup block, the override I/O will determine if the
* block is already in the DDT. If so, then it will replace the io_bp
* with the bp from the DDT and allow the I/O to finish. When the I/O
* reaches the done callback, dbuf_write_override_done, it will
* check to see if the io_bp and io_bp_override are identical.
* If they are not, then it indicates that the bp was replaced with
* the bp in the DDT and the override bp is freed. This allows
* us to arrive here with a reference on a block that is being
* freed. So if we have an I/O in progress, or a reference to
* this hdr, then we don't destroy the hdr.
*/
if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
mutex_exit(hash_lock);
} else {
mutex_exit(hash_lock);
}
}
/*
* Release this buffer from the cache, making it an anonymous buffer. This
* must be done after a read and prior to modifying the buffer contents.
* If the buffer has more than one reference, we must make
* a new hdr for the buffer.
*/
void
arc_release(arc_buf_t *buf, void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* It would be nice to assert that if its DMU metadata (level >
* 0 || it's the dnode file), then it must be syncing context.
* But we don't know that information at this level.
*/
mutex_enter(&buf->b_evict_lock);
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* We don't grab the hash lock prior to this check, because if
* the buffer's header is in the arc_anon state, it won't be
* linked into the hash table.
*/
if (hdr->b_l1hdr.b_state == arc_anon) {
mutex_exit(&buf->b_evict_lock);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
ASSERT(!HDR_HAS_L2HDR(hdr));
- ASSERT(HDR_EMPTY(hdr));
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
hdr->b_l1hdr.b_arc_access = 0;
/*
* If the buf is being overridden then it may already
* have a hdr that is not empty.
*/
buf_discard_identity(hdr);
arc_buf_thaw(buf);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
/*
* This assignment is only valid as long as the hash_lock is
* held, we must be careful not to reference state or the
* b_state field after dropping the lock.
*/
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(state, !=, arc_anon);
/* this buffer is not on any list */
ASSERT3S(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
if (HDR_HAS_L2HDR(hdr)) {
mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
/*
* We have to recheck this conditional again now that
* we're holding the l2ad_mtx to prevent a race with
* another thread which might be concurrently calling
* l2arc_evict(). In that case, l2arc_evict() might have
* destroyed the header's L2 portion as we were waiting
* to acquire the l2ad_mtx.
*/
if (HDR_HAS_L2HDR(hdr))
arc_hdr_l2hdr_destroy(hdr);
mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
}
/*
* Do we have more than one buf?
*/
if (hdr->b_l1hdr.b_bufcnt > 1) {
arc_buf_hdr_t *nhdr;
uint64_t spa = hdr->b_spa;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t lsize = HDR_GET_LSIZE(hdr);
boolean_t protected = HDR_PROTECTED(hdr);
enum zio_compress compress = arc_hdr_get_compress(hdr);
arc_buf_contents_t type = arc_buf_type(hdr);
VERIFY3U(hdr->b_type, ==, type);
ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
(void) remove_reference(hdr, hash_lock, tag);
if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
ASSERT(ARC_BUF_LAST(buf));
}
/*
* Pull the data off of this hdr and attach it to
* a new anonymous hdr. Also find the last buffer
* in the hdr's buffer list.
*/
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
ASSERT3P(lastbuf, !=, NULL);
/*
* If the current arc_buf_t and the hdr are sharing their data
* buffer, then we must stop sharing that block.
*/
if (arc_buf_is_shared(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
VERIFY(!arc_buf_is_shared(lastbuf));
/*
* First, sever the block sharing relationship between
* buf and the arc_buf_hdr_t.
*/
arc_unshare_buf(hdr, buf);
/*
* Now we need to recreate the hdr's b_pabd. Since we
* have lastbuf handy, we try to share with it, but if
* we can't then we allocate a new b_pabd and copy the
* data from buf into it.
*/
if (arc_can_share(hdr, lastbuf)) {
arc_share_buf(hdr, lastbuf);
} else {
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd,
buf->b_data, psize);
}
VERIFY3P(lastbuf->b_data, !=, NULL);
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
ASSERT(!ARC_BUF_SHARED(buf));
}
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(&state->arcs_size,
arc_buf_size(buf), buf);
if (zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
ASSERT3P(state, !=, arc_l2c_only);
(void) zfs_refcount_remove_many(
&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
hdr->b_l1hdr.b_bufcnt -= 1;
if (ARC_BUF_ENCRYPTED(buf))
hdr->b_crypt_hdr.b_ebufcnt -= 1;
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
/* if this is the last uncompressed buf free the checksum */
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
mutex_exit(hash_lock);
/*
* Allocate a new hdr. The new hdr will contain a b_pabd
* buffer which will be freed in arc_write().
*/
nhdr = arc_hdr_alloc(spa, psize, lsize, protected,
compress, hdr->b_complevel, type);
ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(nhdr->b_l1hdr.b_bufcnt);
ASSERT0(zfs_refcount_count(&nhdr->b_l1hdr.b_refcnt));
VERIFY3U(nhdr->b_type, ==, type);
ASSERT(!HDR_SHARED_DATA(nhdr));
nhdr->b_l1hdr.b_buf = buf;
nhdr->b_l1hdr.b_bufcnt = 1;
if (ARC_BUF_ENCRYPTED(buf))
nhdr->b_crypt_hdr.b_ebufcnt = 1;
(void) zfs_refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
buf->b_hdr = nhdr;
mutex_exit(&buf->b_evict_lock);
(void) zfs_refcount_add_many(&arc_anon->arcs_size,
arc_buf_size(buf), buf);
} else {
mutex_exit(&buf->b_evict_lock);
ASSERT(zfs_refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
/* protected by hash lock, or hdr is on arc_anon */
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
arc_change_state(arc_anon, hdr, hash_lock);
hdr->b_l1hdr.b_arc_access = 0;
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_buf_thaw(buf);
}
}
int
arc_released(arc_buf_t *buf)
{
int released;
mutex_enter(&buf->b_evict_lock);
released = (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_state == arc_anon);
mutex_exit(&buf->b_evict_lock);
return (released);
}
#ifdef ZFS_DEBUG
int
arc_referenced(arc_buf_t *buf)
{
int referenced;
mutex_enter(&buf->b_evict_lock);
referenced = (zfs_refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
mutex_exit(&buf->b_evict_lock);
return (referenced);
}
#endif
static void
arc_write_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp);
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!zfs_refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
/*
* If we're reexecuting this zio because the pool suspended, then
* cleanup any state that was previously set the first time the
* callback was invoked.
*/
if (zio->io_flags & ZIO_FLAG_REEXECUTED) {
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
if (hdr->b_l1hdr.b_pabd != NULL) {
if (arc_buf_is_shared(buf)) {
arc_unshare_buf(hdr, buf);
} else {
arc_hdr_free_abd(hdr, B_FALSE);
}
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT(!arc_buf_is_shared(buf));
callback->awcb_ready(zio, buf, callback->awcb_private);
if (HDR_IO_IN_PROGRESS(hdr))
ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (BP_IS_PROTECTED(bp) != !!HDR_PROTECTED(hdr))
hdr = arc_hdr_realloc_crypt(hdr, BP_IS_PROTECTED(bp));
if (BP_IS_PROTECTED(bp)) {
/* ZIL blocks are written through zio_rewrite */
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(HDR_PROTECTED(hdr));
if (BP_SHOULD_BYTESWAP(bp)) {
if (BP_GET_LEVEL(bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
}
/*
* If this block was written for raw encryption but the zio layer
* ended up only authenticating it, adjust the buffer flags now.
*/
if (BP_IS_AUTHENTICATED(bp) && ARC_BUF_ENCRYPTED(buf)) {
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
if (BP_GET_COMPRESS(bp) == ZIO_COMPRESS_OFF)
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
} else if (BP_IS_HOLE(bp) && ARC_BUF_ENCRYPTED(buf)) {
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
}
/* this must be done after the buffer flags are adjusted */
arc_cksum_compute(buf);
enum zio_compress compress;
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
compress = ZIO_COMPRESS_OFF;
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
compress = BP_GET_COMPRESS(bp);
}
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
hdr->b_complevel = zio->io_prop.zp_complevel;
if (zio->io_error != 0 || psize == 0)
goto out;
/*
* Fill the hdr with data. If the buffer is encrypted we have no choice
* but to copy the data into b_radb. If the hdr is compressed, the data
* we want is available from the zio, otherwise we can take it from
* the buf.
*
* We might be able to share the buf's data with the hdr here. However,
* doing so would cause the ARC to be full of linear ABDs if we write a
* lot of shareable data. As a compromise, we check whether scattered
* ABDs are allowed, and assume that if they are then the user wants
* the ARC to be primarily filled with them regardless of the data being
* written. Therefore, if they're allowed then we allocate one and copy
* the data into it; otherwise, we share the data directly if we can.
*/
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(psize, >, 0);
ASSERT(ARC_BUF_COMPRESSED(buf));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT | ARC_HDR_ALLOC_RDATA |
ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (!abd_size_alloc_linear(arc_buf_size(buf)) ||
!arc_can_share(hdr, buf)) {
/*
* Ideally, we would always copy the io_abd into b_pabd, but the
* user may have disabled compressed ARC, thus we must check the
* hdr's compression setting rather than the io_bp's.
*/
if (BP_IS_ENCRYPTED(bp)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT |
ARC_HDR_ALLOC_RDATA | ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
!ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT |
ARC_HDR_USE_RESERVE);
abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize);
} else {
ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr));
arc_hdr_alloc_abd(hdr, ARC_HDR_DO_ADAPT |
ARC_HDR_USE_RESERVE);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
}
} else {
ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd));
ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf));
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
arc_share_buf(hdr, buf);
}
out:
arc_hdr_verify(hdr, bp);
spl_fstrans_unmark(cookie);
}
static void
arc_write_children_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
callback->awcb_children_ready(zio, buf, callback->awcb_private);
}
/*
* The SPA calls this callback for each physical write that happens on behalf
* of a logical write. See the comment in dbuf_write_physdone() for details.
*/
static void
arc_write_physdone(zio_t *zio)
{
arc_write_callback_t *cb = zio->io_private;
if (cb->awcb_physdone != NULL)
cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
}
static void
arc_write_done(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
buf_discard_identity(hdr);
} else {
hdr->b_dva = *BP_IDENTITY(zio->io_bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
}
} else {
ASSERT(HDR_EMPTY(hdr));
}
/*
* If the block to be written was all-zero or compressed enough to be
* embedded in the BP, no write was performed so there will be no
* dva/birth/checksum. The buffer must therefore remain anonymous
* (and uncached).
*/
if (!HDR_EMPTY(hdr)) {
arc_buf_hdr_t *exists;
kmutex_t *hash_lock;
ASSERT3U(zio->io_error, ==, 0);
arc_cksum_verify(buf);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists != NULL) {
/*
* This can only happen if we overwrite for
* sync-to-convergence, because we remove
* buffers from the hash table when we arc_free().
*/
if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad overwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
ASSERT(zfs_refcount_is_zero(
&exists->b_l1hdr.b_refcnt));
arc_change_state(arc_anon, exists, hash_lock);
arc_hdr_destroy(exists);
mutex_exit(hash_lock);
exists = buf_hash_insert(hdr, &hash_lock);
ASSERT3P(exists, ==, NULL);
} else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
/* nopwrite */
ASSERT(zio->io_prop.zp_nopwrite);
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad nopwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
} else {
/* Dedup */
ASSERT(hdr->b_l1hdr.b_bufcnt == 1);
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(BP_GET_DEDUP(zio->io_bp));
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
/* if it's not anon, we are doing a scrub */
if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
arc_access(hdr, hash_lock);
mutex_exit(hash_lock);
} else {
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
}
ASSERT(!zfs_refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
callback->awcb_done(zio, buf, callback->awcb_private);
abd_free(zio->io_abd);
kmem_free(callback, sizeof (arc_write_callback_t));
}
zio_t *
arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc,
const zio_prop_t *zp, arc_write_done_func_t *ready,
arc_write_done_func_t *children_ready, arc_write_done_func_t *physdone,
arc_write_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
arc_write_callback_t *callback;
zio_t *zio;
zio_prop_t localprop = *zp;
ASSERT3P(ready, !=, NULL);
ASSERT3P(done, !=, NULL);
ASSERT(!HDR_IO_ERROR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
if (l2arc)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
localprop.zp_encrypt = B_TRUE;
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
localprop.zp_byteorder =
(hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
bcopy(hdr->b_crypt_hdr.b_salt, localprop.zp_salt,
ZIO_DATA_SALT_LEN);
bcopy(hdr->b_crypt_hdr.b_iv, localprop.zp_iv,
ZIO_DATA_IV_LEN);
bcopy(hdr->b_crypt_hdr.b_mac, localprop.zp_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) {
localprop.zp_nopwrite = B_FALSE;
localprop.zp_copies =
MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1);
}
zio_flags |= ZIO_FLAG_RAW;
} else if (ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf));
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_complevel = hdr->b_complevel;
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
callback->awcb_ready = ready;
callback->awcb_children_ready = children_ready;
callback->awcb_physdone = physdone;
callback->awcb_done = done;
callback->awcb_private = private;
callback->awcb_buf = buf;
/*
* The hdr's b_pabd is now stale, free it now. A new data block
* will be allocated when the zio pipeline calls arc_write_ready().
*/
if (hdr->b_l1hdr.b_pabd != NULL) {
/*
* If the buf is currently sharing the data block with
* the hdr then we need to break that relationship here.
* The hdr will remain with a NULL data pointer and the
* buf will take sole ownership of the block.
*/
if (arc_buf_is_shared(buf)) {
arc_unshare_buf(hdr, buf);
} else {
arc_hdr_free_abd(hdr, B_FALSE);
}
VERIFY3P(buf->b_data, !=, NULL);
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
if (!(zio_flags & ZIO_FLAG_RAW))
arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
ASSERT(!arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
zio = zio_write(pio, spa, txg, bp,
abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)),
HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready,
(children_ready != NULL) ? arc_write_children_ready : NULL,
arc_write_physdone, arc_write_done, callback,
priority, zio_flags, zb);
return (zio);
}
void
arc_tempreserve_clear(uint64_t reserve)
{
atomic_add_64(&arc_tempreserve, -reserve);
ASSERT((int64_t)arc_tempreserve >= 0);
}
int
arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg)
{
int error;
uint64_t anon_size;
if (!arc_no_grow &&
reserve > arc_c/4 &&
reserve * 4 > (2ULL << SPA_MAXBLOCKSHIFT))
arc_c = MIN(arc_c_max, reserve * 4);
/*
* Throttle when the calculated memory footprint for the TXG
* exceeds the target ARC size.
*/
if (reserve > arc_c) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reserve);
return (SET_ERROR(ERESTART));
}
/*
* Don't count loaned bufs as in flight dirty data to prevent long
* network delays from blocking transactions that are ready to be
* assigned to a txg.
*/
/* assert that it has not wrapped around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
anon_size = MAX((int64_t)(zfs_refcount_count(&arc_anon->arcs_size) -
arc_loaned_bytes), 0);
/*
* Writes will, almost always, require additional memory allocations
* in order to compress/encrypt/etc the data. We therefore need to
* make sure that there is sufficient available memory for this.
*/
error = arc_memory_throttle(spa, reserve, txg);
if (error != 0)
return (error);
/*
* Throttle writes when the amount of dirty data in the cache
* gets too large. We try to keep the cache less than half full
* of dirty blocks so that our sync times don't grow too large.
*
* In the case of one pool being built on another pool, we want
* to make sure we don't end up throttling the lower (backing)
* pool when the upper pool is the majority contributor to dirty
* data. To insure we make forward progress during throttling, we
* also check the current pool's net dirty data and only throttle
* if it exceeds zfs_arc_pool_dirty_percent of the anonymous dirty
* data in the cache.
*
* Note: if two requests come in concurrently, we might let them
* both succeed, when one of them should fail. Not a huge deal.
*/
uint64_t total_dirty = reserve + arc_tempreserve + anon_size;
uint64_t spa_dirty_anon = spa_dirty_data(spa);
uint64_t rarc_c = arc_warm ? arc_c : arc_c_max;
if (total_dirty > rarc_c * zfs_arc_dirty_limit_percent / 100 &&
anon_size > rarc_c * zfs_arc_anon_limit_percent / 100 &&
spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) {
#ifdef ZFS_DEBUG
uint64_t meta_esize = zfs_refcount_count(
&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
uint64_t data_esize =
zfs_refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
"anon_data=%lluK tempreserve=%lluK rarc_c=%lluK\n",
(u_longlong_t)arc_tempreserve >> 10,
(u_longlong_t)meta_esize >> 10,
(u_longlong_t)data_esize >> 10,
(u_longlong_t)reserve >> 10,
(u_longlong_t)rarc_c >> 10);
#endif
DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle);
return (SET_ERROR(ERESTART));
}
atomic_add_64(&arc_tempreserve, reserve);
return (0);
}
static void
arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
kstat_named_t *evict_data, kstat_named_t *evict_metadata)
{
size->value.ui64 = zfs_refcount_count(&state->arcs_size);
evict_data->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
evict_metadata->value.ui64 =
zfs_refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
}
static int
arc_kstat_update(kstat_t *ksp, int rw)
{
arc_stats_t *as = ksp->ks_data;
if (rw == KSTAT_WRITE)
return (SET_ERROR(EACCES));
as->arcstat_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_hits);
as->arcstat_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_misses);
as->arcstat_demand_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_hits);
as->arcstat_demand_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_data_misses);
as->arcstat_demand_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_hits);
as->arcstat_demand_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_metadata_misses);
as->arcstat_prefetch_data_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_hits);
as->arcstat_prefetch_data_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_data_misses);
as->arcstat_prefetch_metadata_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_hits);
as->arcstat_prefetch_metadata_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_prefetch_metadata_misses);
as->arcstat_mru_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_hits);
as->arcstat_mru_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mru_ghost_hits);
as->arcstat_mfu_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_hits);
as->arcstat_mfu_ghost_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_mfu_ghost_hits);
as->arcstat_deleted.value.ui64 =
wmsum_value(&arc_sums.arcstat_deleted);
as->arcstat_mutex_miss.value.ui64 =
wmsum_value(&arc_sums.arcstat_mutex_miss);
as->arcstat_access_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_access_skip);
as->arcstat_evict_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_skip);
as->arcstat_evict_not_enough.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_not_enough);
as->arcstat_evict_l2_cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_cached);
as->arcstat_evict_l2_eligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible);
as->arcstat_evict_l2_eligible_mfu.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mfu);
as->arcstat_evict_l2_eligible_mru.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_eligible_mru);
as->arcstat_evict_l2_ineligible.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_ineligible);
as->arcstat_evict_l2_skip.value.ui64 =
wmsum_value(&arc_sums.arcstat_evict_l2_skip);
as->arcstat_hash_collisions.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_collisions);
as->arcstat_hash_chains.value.ui64 =
wmsum_value(&arc_sums.arcstat_hash_chains);
as->arcstat_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_size);
as->arcstat_compressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_compressed_size);
as->arcstat_uncompressed_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_uncompressed_size);
as->arcstat_overhead_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_overhead_size);
as->arcstat_hdr_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_hdr_size);
as->arcstat_data_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_data_size);
as->arcstat_metadata_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_metadata_size);
as->arcstat_dbuf_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_dbuf_size);
#if defined(COMPAT_FREEBSD11)
as->arcstat_other_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size) +
aggsum_value(&arc_sums.arcstat_dnode_size) +
wmsum_value(&arc_sums.arcstat_dbuf_size);
#endif
arc_kstat_update_state(arc_anon,
&as->arcstat_anon_size,
&as->arcstat_anon_evictable_data,
&as->arcstat_anon_evictable_metadata);
arc_kstat_update_state(arc_mru,
&as->arcstat_mru_size,
&as->arcstat_mru_evictable_data,
&as->arcstat_mru_evictable_metadata);
arc_kstat_update_state(arc_mru_ghost,
&as->arcstat_mru_ghost_size,
&as->arcstat_mru_ghost_evictable_data,
&as->arcstat_mru_ghost_evictable_metadata);
arc_kstat_update_state(arc_mfu,
&as->arcstat_mfu_size,
&as->arcstat_mfu_evictable_data,
&as->arcstat_mfu_evictable_metadata);
arc_kstat_update_state(arc_mfu_ghost,
&as->arcstat_mfu_ghost_size,
&as->arcstat_mfu_ghost_evictable_data,
&as->arcstat_mfu_ghost_evictable_metadata);
as->arcstat_dnode_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_dnode_size);
as->arcstat_bonus_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_bonus_size);
as->arcstat_l2_hits.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_hits);
as->arcstat_l2_misses.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_misses);
as->arcstat_l2_prefetch_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_prefetch_asize);
as->arcstat_l2_mru_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mru_asize);
as->arcstat_l2_mfu_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_mfu_asize);
as->arcstat_l2_bufc_data_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_data_asize);
as->arcstat_l2_bufc_metadata_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_bufc_metadata_asize);
as->arcstat_l2_feeds.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_feeds);
as->arcstat_l2_rw_clash.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rw_clash);
as->arcstat_l2_read_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_read_bytes);
as->arcstat_l2_write_bytes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_write_bytes);
as->arcstat_l2_writes_sent.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_sent);
as->arcstat_l2_writes_done.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_done);
as->arcstat_l2_writes_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_error);
as->arcstat_l2_writes_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_writes_lock_retry);
as->arcstat_l2_evict_lock_retry.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_lock_retry);
as->arcstat_l2_evict_reading.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_reading);
as->arcstat_l2_evict_l1cached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_evict_l1cached);
as->arcstat_l2_free_on_write.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_free_on_write);
as->arcstat_l2_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_abort_lowmem);
as->arcstat_l2_cksum_bad.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_cksum_bad);
as->arcstat_l2_io_error.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_io_error);
as->arcstat_l2_lsize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_lsize);
as->arcstat_l2_psize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_psize);
as->arcstat_l2_hdr_size.value.ui64 =
aggsum_value(&arc_sums.arcstat_l2_hdr_size);
as->arcstat_l2_log_blk_writes.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_writes);
as->arcstat_l2_log_blk_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_asize);
as->arcstat_l2_log_blk_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_log_blk_count);
as->arcstat_l2_rebuild_success.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_success);
as->arcstat_l2_rebuild_abort_unsupported.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
as->arcstat_l2_rebuild_abort_io_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
as->arcstat_l2_rebuild_abort_dh_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
as->arcstat_l2_rebuild_abort_cksum_lb_errors.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
as->arcstat_l2_rebuild_abort_lowmem.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
as->arcstat_l2_rebuild_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_size);
as->arcstat_l2_rebuild_asize.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_asize);
as->arcstat_l2_rebuild_bufs.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs);
as->arcstat_l2_rebuild_bufs_precached.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_bufs_precached);
as->arcstat_l2_rebuild_log_blks.value.ui64 =
wmsum_value(&arc_sums.arcstat_l2_rebuild_log_blks);
as->arcstat_memory_throttle_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_throttle_count);
as->arcstat_memory_direct_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_direct_count);
as->arcstat_memory_indirect_count.value.ui64 =
wmsum_value(&arc_sums.arcstat_memory_indirect_count);
as->arcstat_memory_all_bytes.value.ui64 =
arc_all_memory();
as->arcstat_memory_free_bytes.value.ui64 =
arc_free_memory();
as->arcstat_memory_available_bytes.value.i64 =
arc_available_memory();
as->arcstat_prune.value.ui64 =
wmsum_value(&arc_sums.arcstat_prune);
as->arcstat_meta_used.value.ui64 =
aggsum_value(&arc_sums.arcstat_meta_used);
as->arcstat_async_upgrade_sync.value.ui64 =
wmsum_value(&arc_sums.arcstat_async_upgrade_sync);
as->arcstat_demand_hit_predictive_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_predictive_prefetch);
as->arcstat_demand_hit_prescient_prefetch.value.ui64 =
wmsum_value(&arc_sums.arcstat_demand_hit_prescient_prefetch);
as->arcstat_raw_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_raw_size);
as->arcstat_cached_only_in_progress.value.ui64 =
wmsum_value(&arc_sums.arcstat_cached_only_in_progress);
as->arcstat_abd_chunk_waste_size.value.ui64 =
wmsum_value(&arc_sums.arcstat_abd_chunk_waste_size);
return (0);
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the ARC eviction
* code is laid out; arc_evict_state() assumes ARC buffers are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
static unsigned int
arc_state_multilist_index_func(multilist_t *ml, void *obj)
{
arc_buf_hdr_t *hdr = obj;
/*
* We rely on b_dva to generate evenly distributed index
* numbers using buf_hash below. So, as an added precaution,
* let's make sure we never add empty buffers to the arc lists.
*/
ASSERT(!HDR_EMPTY(hdr));
/*
* The assumption here, is the hash value for a given
* arc_buf_hdr_t will remain constant throughout its lifetime
* (i.e. its b_spa, b_dva, and b_birth fields don't change).
* Thus, we don't need to store the header's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed. In this context full 64bit
* division would be a waste of time, so limit it to 32 bits.
*/
return ((unsigned int)buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) %
multilist_get_num_sublists(ml));
}
static unsigned int
arc_state_l2c_multilist_index_func(multilist_t *ml, void *obj)
{
panic("Header %p insert into arc_l2c_only %p", obj, ml);
}
#define WARN_IF_TUNING_IGNORED(tuning, value, do_warn) do { \
if ((do_warn) && (tuning) && ((tuning) != (value))) { \
cmn_err(CE_WARN, \
"ignoring tunable %s (using %llu instead)", \
(#tuning), (u_longlong_t)(value)); \
} \
} while (0)
/*
* Called during module initialization and periodically thereafter to
* apply reasonable changes to the exposed performance tunings. Can also be
* called explicitly by param_set_arc_*() functions when ARC tunables are
* updated manually. Non-zero zfs_* values which differ from the currently set
* values will be applied.
*/
void
arc_tuning_update(boolean_t verbose)
{
uint64_t allmem = arc_all_memory();
unsigned long limit;
/* Valid range: 32M - <arc_c_max> */
if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) &&
(zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) &&
(zfs_arc_min <= arc_c_max)) {
arc_c_min = zfs_arc_min;
arc_c = MAX(arc_c, arc_c_min);
}
WARN_IF_TUNING_IGNORED(zfs_arc_min, arc_c_min, verbose);
/* Valid range: 64M - <all physical memory> */
if ((zfs_arc_max) && (zfs_arc_max != arc_c_max) &&
(zfs_arc_max >= MIN_ARC_MAX) && (zfs_arc_max < allmem) &&
(zfs_arc_max > arc_c_min)) {
arc_c_max = zfs_arc_max;
arc_c = MIN(arc_c, arc_c_max);
arc_p = (arc_c >> 1);
if (arc_meta_limit > arc_c_max)
arc_meta_limit = arc_c_max;
if (arc_dnode_size_limit > arc_meta_limit)
arc_dnode_size_limit = arc_meta_limit;
}
WARN_IF_TUNING_IGNORED(zfs_arc_max, arc_c_max, verbose);
/* Valid range: 16M - <arc_c_max> */
if ((zfs_arc_meta_min) && (zfs_arc_meta_min != arc_meta_min) &&
(zfs_arc_meta_min >= 1ULL << SPA_MAXBLOCKSHIFT) &&
(zfs_arc_meta_min <= arc_c_max)) {
arc_meta_min = zfs_arc_meta_min;
if (arc_meta_limit < arc_meta_min)
arc_meta_limit = arc_meta_min;
if (arc_dnode_size_limit < arc_meta_min)
arc_dnode_size_limit = arc_meta_min;
}
WARN_IF_TUNING_IGNORED(zfs_arc_meta_min, arc_meta_min, verbose);
/* Valid range: <arc_meta_min> - <arc_c_max> */
limit = zfs_arc_meta_limit ? zfs_arc_meta_limit :
MIN(zfs_arc_meta_limit_percent, 100) * arc_c_max / 100;
if ((limit != arc_meta_limit) &&
(limit >= arc_meta_min) &&
(limit <= arc_c_max))
arc_meta_limit = limit;
WARN_IF_TUNING_IGNORED(zfs_arc_meta_limit, arc_meta_limit, verbose);
/* Valid range: <arc_meta_min> - <arc_meta_limit> */
limit = zfs_arc_dnode_limit ? zfs_arc_dnode_limit :
MIN(zfs_arc_dnode_limit_percent, 100) * arc_meta_limit / 100;
if ((limit != arc_dnode_size_limit) &&
(limit >= arc_meta_min) &&
(limit <= arc_meta_limit))
arc_dnode_size_limit = limit;
WARN_IF_TUNING_IGNORED(zfs_arc_dnode_limit, arc_dnode_size_limit,
verbose);
/* Valid range: 1 - N */
if (zfs_arc_grow_retry)
arc_grow_retry = zfs_arc_grow_retry;
/* Valid range: 1 - N */
if (zfs_arc_shrink_shift) {
arc_shrink_shift = zfs_arc_shrink_shift;
arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1);
}
/* Valid range: 1 - N */
if (zfs_arc_p_min_shift)
arc_p_min_shift = zfs_arc_p_min_shift;
/* Valid range: 1 - N ms */
if (zfs_arc_min_prefetch_ms)
arc_min_prefetch_ms = zfs_arc_min_prefetch_ms;
/* Valid range: 1 - N ms */
if (zfs_arc_min_prescient_prefetch_ms) {
arc_min_prescient_prefetch_ms =
zfs_arc_min_prescient_prefetch_ms;
}
/* Valid range: 0 - 100 */
if ((zfs_arc_lotsfree_percent >= 0) &&
(zfs_arc_lotsfree_percent <= 100))
arc_lotsfree_percent = zfs_arc_lotsfree_percent;
WARN_IF_TUNING_IGNORED(zfs_arc_lotsfree_percent, arc_lotsfree_percent,
verbose);
/* Valid range: 0 - <all physical memory> */
if ((zfs_arc_sys_free) && (zfs_arc_sys_free != arc_sys_free))
arc_sys_free = MIN(MAX(zfs_arc_sys_free, 0), allmem);
WARN_IF_TUNING_IGNORED(zfs_arc_sys_free, arc_sys_free, verbose);
}
static void
arc_state_init(void)
{
multilist_create(&arc_mru->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mru->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mfu->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
multilist_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
/*
* L2 headers should never be on the L2 state list since they don't
* have L1 headers allocated. Special index function asserts that.
*/
multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_l2c_multilist_index_func);
multilist_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA],
sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_l2c_multilist_index_func);
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_create(&arc_anon->arcs_size);
zfs_refcount_create(&arc_mru->arcs_size);
zfs_refcount_create(&arc_mru_ghost->arcs_size);
zfs_refcount_create(&arc_mfu->arcs_size);
zfs_refcount_create(&arc_mfu_ghost->arcs_size);
zfs_refcount_create(&arc_l2c_only->arcs_size);
wmsum_init(&arc_sums.arcstat_hits, 0);
wmsum_init(&arc_sums.arcstat_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_data_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_data_misses, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_demand_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_data_misses, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_hits, 0);
wmsum_init(&arc_sums.arcstat_prefetch_metadata_misses, 0);
wmsum_init(&arc_sums.arcstat_mru_hits, 0);
wmsum_init(&arc_sums.arcstat_mru_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_hits, 0);
wmsum_init(&arc_sums.arcstat_mfu_ghost_hits, 0);
wmsum_init(&arc_sums.arcstat_deleted, 0);
wmsum_init(&arc_sums.arcstat_mutex_miss, 0);
wmsum_init(&arc_sums.arcstat_access_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_skip, 0);
wmsum_init(&arc_sums.arcstat_evict_not_enough, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_cached, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mfu, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_eligible_mru, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_ineligible, 0);
wmsum_init(&arc_sums.arcstat_evict_l2_skip, 0);
wmsum_init(&arc_sums.arcstat_hash_collisions, 0);
wmsum_init(&arc_sums.arcstat_hash_chains, 0);
aggsum_init(&arc_sums.arcstat_size, 0);
wmsum_init(&arc_sums.arcstat_compressed_size, 0);
wmsum_init(&arc_sums.arcstat_uncompressed_size, 0);
wmsum_init(&arc_sums.arcstat_overhead_size, 0);
wmsum_init(&arc_sums.arcstat_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_data_size, 0);
wmsum_init(&arc_sums.arcstat_metadata_size, 0);
wmsum_init(&arc_sums.arcstat_dbuf_size, 0);
aggsum_init(&arc_sums.arcstat_dnode_size, 0);
wmsum_init(&arc_sums.arcstat_bonus_size, 0);
wmsum_init(&arc_sums.arcstat_l2_hits, 0);
wmsum_init(&arc_sums.arcstat_l2_misses, 0);
wmsum_init(&arc_sums.arcstat_l2_prefetch_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mru_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_mfu_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_data_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_bufc_metadata_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_feeds, 0);
wmsum_init(&arc_sums.arcstat_l2_rw_clash, 0);
wmsum_init(&arc_sums.arcstat_l2_read_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_write_bytes, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_sent, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_done, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_error, 0);
wmsum_init(&arc_sums.arcstat_l2_writes_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_lock_retry, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_reading, 0);
wmsum_init(&arc_sums.arcstat_l2_evict_l1cached, 0);
wmsum_init(&arc_sums.arcstat_l2_free_on_write, 0);
wmsum_init(&arc_sums.arcstat_l2_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_cksum_bad, 0);
wmsum_init(&arc_sums.arcstat_l2_io_error, 0);
wmsum_init(&arc_sums.arcstat_l2_lsize, 0);
wmsum_init(&arc_sums.arcstat_l2_psize, 0);
aggsum_init(&arc_sums.arcstat_l2_hdr_size, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_writes, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_log_blk_count, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_success, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_unsupported, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_io_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_dh_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_abort_lowmem, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_size, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_asize, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_bufs_precached, 0);
wmsum_init(&arc_sums.arcstat_l2_rebuild_log_blks, 0);
wmsum_init(&arc_sums.arcstat_memory_throttle_count, 0);
wmsum_init(&arc_sums.arcstat_memory_direct_count, 0);
wmsum_init(&arc_sums.arcstat_memory_indirect_count, 0);
wmsum_init(&arc_sums.arcstat_prune, 0);
aggsum_init(&arc_sums.arcstat_meta_used, 0);
wmsum_init(&arc_sums.arcstat_async_upgrade_sync, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_predictive_prefetch, 0);
wmsum_init(&arc_sums.arcstat_demand_hit_prescient_prefetch, 0);
wmsum_init(&arc_sums.arcstat_raw_size, 0);
wmsum_init(&arc_sums.arcstat_cached_only_in_progress, 0);
wmsum_init(&arc_sums.arcstat_abd_chunk_waste_size, 0);
arc_anon->arcs_state = ARC_STATE_ANON;
arc_mru->arcs_state = ARC_STATE_MRU;
arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST;
arc_mfu->arcs_state = ARC_STATE_MFU;
arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST;
arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY;
}
static void
arc_state_fini(void)
{
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
zfs_refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
zfs_refcount_destroy(&arc_anon->arcs_size);
zfs_refcount_destroy(&arc_mru->arcs_size);
zfs_refcount_destroy(&arc_mru_ghost->arcs_size);
zfs_refcount_destroy(&arc_mfu->arcs_size);
zfs_refcount_destroy(&arc_mfu_ghost->arcs_size);
zfs_refcount_destroy(&arc_l2c_only->arcs_size);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]);
wmsum_fini(&arc_sums.arcstat_hits);
wmsum_fini(&arc_sums.arcstat_misses);
wmsum_fini(&arc_sums.arcstat_demand_data_hits);
wmsum_fini(&arc_sums.arcstat_demand_data_misses);
wmsum_fini(&arc_sums.arcstat_demand_metadata_hits);
wmsum_fini(&arc_sums.arcstat_demand_metadata_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_data_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_data_misses);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_hits);
wmsum_fini(&arc_sums.arcstat_prefetch_metadata_misses);
wmsum_fini(&arc_sums.arcstat_mru_hits);
wmsum_fini(&arc_sums.arcstat_mru_ghost_hits);
wmsum_fini(&arc_sums.arcstat_mfu_hits);
wmsum_fini(&arc_sums.arcstat_mfu_ghost_hits);
wmsum_fini(&arc_sums.arcstat_deleted);
wmsum_fini(&arc_sums.arcstat_mutex_miss);
wmsum_fini(&arc_sums.arcstat_access_skip);
wmsum_fini(&arc_sums.arcstat_evict_skip);
wmsum_fini(&arc_sums.arcstat_evict_not_enough);
wmsum_fini(&arc_sums.arcstat_evict_l2_cached);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mfu);
wmsum_fini(&arc_sums.arcstat_evict_l2_eligible_mru);
wmsum_fini(&arc_sums.arcstat_evict_l2_ineligible);
wmsum_fini(&arc_sums.arcstat_evict_l2_skip);
wmsum_fini(&arc_sums.arcstat_hash_collisions);
wmsum_fini(&arc_sums.arcstat_hash_chains);
aggsum_fini(&arc_sums.arcstat_size);
wmsum_fini(&arc_sums.arcstat_compressed_size);
wmsum_fini(&arc_sums.arcstat_uncompressed_size);
wmsum_fini(&arc_sums.arcstat_overhead_size);
wmsum_fini(&arc_sums.arcstat_hdr_size);
wmsum_fini(&arc_sums.arcstat_data_size);
wmsum_fini(&arc_sums.arcstat_metadata_size);
wmsum_fini(&arc_sums.arcstat_dbuf_size);
aggsum_fini(&arc_sums.arcstat_dnode_size);
wmsum_fini(&arc_sums.arcstat_bonus_size);
wmsum_fini(&arc_sums.arcstat_l2_hits);
wmsum_fini(&arc_sums.arcstat_l2_misses);
wmsum_fini(&arc_sums.arcstat_l2_prefetch_asize);
wmsum_fini(&arc_sums.arcstat_l2_mru_asize);
wmsum_fini(&arc_sums.arcstat_l2_mfu_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_data_asize);
wmsum_fini(&arc_sums.arcstat_l2_bufc_metadata_asize);
wmsum_fini(&arc_sums.arcstat_l2_feeds);
wmsum_fini(&arc_sums.arcstat_l2_rw_clash);
wmsum_fini(&arc_sums.arcstat_l2_read_bytes);
wmsum_fini(&arc_sums.arcstat_l2_write_bytes);
wmsum_fini(&arc_sums.arcstat_l2_writes_sent);
wmsum_fini(&arc_sums.arcstat_l2_writes_done);
wmsum_fini(&arc_sums.arcstat_l2_writes_error);
wmsum_fini(&arc_sums.arcstat_l2_writes_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_lock_retry);
wmsum_fini(&arc_sums.arcstat_l2_evict_reading);
wmsum_fini(&arc_sums.arcstat_l2_evict_l1cached);
wmsum_fini(&arc_sums.arcstat_l2_free_on_write);
wmsum_fini(&arc_sums.arcstat_l2_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_cksum_bad);
wmsum_fini(&arc_sums.arcstat_l2_io_error);
wmsum_fini(&arc_sums.arcstat_l2_lsize);
wmsum_fini(&arc_sums.arcstat_l2_psize);
aggsum_fini(&arc_sums.arcstat_l2_hdr_size);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_writes);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_asize);
wmsum_fini(&arc_sums.arcstat_l2_log_blk_count);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_success);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_unsupported);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_io_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_dh_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_cksum_lb_errors);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_abort_lowmem);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_size);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_asize);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_bufs_precached);
wmsum_fini(&arc_sums.arcstat_l2_rebuild_log_blks);
wmsum_fini(&arc_sums.arcstat_memory_throttle_count);
wmsum_fini(&arc_sums.arcstat_memory_direct_count);
wmsum_fini(&arc_sums.arcstat_memory_indirect_count);
wmsum_fini(&arc_sums.arcstat_prune);
aggsum_fini(&arc_sums.arcstat_meta_used);
wmsum_fini(&arc_sums.arcstat_async_upgrade_sync);
wmsum_fini(&arc_sums.arcstat_demand_hit_predictive_prefetch);
wmsum_fini(&arc_sums.arcstat_demand_hit_prescient_prefetch);
wmsum_fini(&arc_sums.arcstat_raw_size);
wmsum_fini(&arc_sums.arcstat_cached_only_in_progress);
wmsum_fini(&arc_sums.arcstat_abd_chunk_waste_size);
}
uint64_t
arc_target_bytes(void)
{
return (arc_c);
}
void
arc_set_limits(uint64_t allmem)
{
/* Set min cache to 1/32 of all memory, or 32MB, whichever is more. */
arc_c_min = MAX(allmem / 32, 2ULL << SPA_MAXBLOCKSHIFT);
/* How to set default max varies by platform. */
arc_c_max = arc_default_max(arc_c_min, allmem);
}
void
arc_init(void)
{
uint64_t percent, allmem = arc_all_memory();
mutex_init(&arc_evict_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&arc_evict_waiters, sizeof (arc_evict_waiter_t),
offsetof(arc_evict_waiter_t, aew_node));
arc_min_prefetch_ms = 1000;
arc_min_prescient_prefetch_ms = 6000;
#if defined(_KERNEL)
arc_lowmem_init();
#endif
arc_set_limits(allmem);
#ifdef _KERNEL
/*
* If zfs_arc_max is non-zero at init, meaning it was set in the kernel
* environment before the module was loaded, don't block setting the
* maximum because it is less than arc_c_min, instead, reset arc_c_min
* to a lower value.
* zfs_arc_min will be handled by arc_tuning_update().
*/
if (zfs_arc_max != 0 && zfs_arc_max >= MIN_ARC_MAX &&
zfs_arc_max < allmem) {
arc_c_max = zfs_arc_max;
if (arc_c_min >= arc_c_max) {
arc_c_min = MAX(zfs_arc_max / 2,
2ULL << SPA_MAXBLOCKSHIFT);
}
}
#else
/*
* In userland, there's only the memory pressure that we artificially
* create (see arc_available_memory()). Don't let arc_c get too
* small, because it can cause transactions to be larger than
* arc_c, causing arc_tempreserve_space() to fail.
*/
arc_c_min = MAX(arc_c_max / 2, 2ULL << SPA_MAXBLOCKSHIFT);
#endif
arc_c = arc_c_min;
arc_p = (arc_c >> 1);
/* Set min to 1/2 of arc_c_min */
arc_meta_min = 1ULL << SPA_MAXBLOCKSHIFT;
/*
* Set arc_meta_limit to a percent of arc_c_max with a floor of
* arc_meta_min, and a ceiling of arc_c_max.
*/
percent = MIN(zfs_arc_meta_limit_percent, 100);
arc_meta_limit = MAX(arc_meta_min, (percent * arc_c_max) / 100);
percent = MIN(zfs_arc_dnode_limit_percent, 100);
arc_dnode_size_limit = (percent * arc_meta_limit) / 100;
/* Apply user specified tunings */
arc_tuning_update(B_TRUE);
/* if kmem_flags are set, lets try to use less memory */
if (kmem_debugging())
arc_c = arc_c / 2;
if (arc_c < arc_c_min)
arc_c = arc_c_min;
arc_register_hotplug();
arc_state_init();
buf_init();
list_create(&arc_prune_list, sizeof (arc_prune_t),
offsetof(arc_prune_t, p_node));
mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL);
arc_prune_taskq = taskq_create("arc_prune", 100, defclsyspri,
boot_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC |
TASKQ_THREADS_CPU_PCT);
arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (arc_ksp != NULL) {
arc_ksp->ks_data = &arc_stats;
arc_ksp->ks_update = arc_kstat_update;
kstat_install(arc_ksp);
}
arc_evict_zthr = zthr_create("arc_evict",
arc_evict_cb_check, arc_evict_cb, NULL, defclsyspri);
arc_reap_zthr = zthr_create_timer("arc_reap",
arc_reap_cb_check, arc_reap_cb, NULL, SEC2NSEC(1), minclsyspri);
arc_warm = B_FALSE;
/*
* Calculate maximum amount of dirty data per pool.
*
* If it has been set by a module parameter, take that.
* Otherwise, use a percentage of physical memory defined by
* zfs_dirty_data_max_percent (default 10%) with a cap at
* zfs_dirty_data_max_max (default 4G or 25% of physical memory).
*/
#ifdef __LP64__
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#else
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(1ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
#endif
if (zfs_dirty_data_max == 0) {
zfs_dirty_data_max = allmem *
zfs_dirty_data_max_percent / 100;
zfs_dirty_data_max = MIN(zfs_dirty_data_max,
zfs_dirty_data_max_max);
}
if (zfs_wrlog_data_max == 0) {
/*
* dp_wrlog_total is reduced for each txg at the end of
* spa_sync(). However, dp_dirty_total is reduced every time
* a block is written out. Thus under normal operation,
* dp_wrlog_total could grow 2 times as big as
* zfs_dirty_data_max.
*/
zfs_wrlog_data_max = zfs_dirty_data_max * 2;
}
}
void
arc_fini(void)
{
arc_prune_t *p;
#ifdef _KERNEL
arc_lowmem_fini();
#endif /* _KERNEL */
/* Use B_TRUE to ensure *all* buffers are evicted */
arc_flush(NULL, B_TRUE);
if (arc_ksp != NULL) {
kstat_delete(arc_ksp);
arc_ksp = NULL;
}
taskq_wait(arc_prune_taskq);
taskq_destroy(arc_prune_taskq);
mutex_enter(&arc_prune_mtx);
while ((p = list_head(&arc_prune_list)) != NULL) {
list_remove(&arc_prune_list, p);
zfs_refcount_remove(&p->p_refcnt, &arc_prune_list);
zfs_refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
mutex_exit(&arc_prune_mtx);
list_destroy(&arc_prune_list);
mutex_destroy(&arc_prune_mtx);
(void) zthr_cancel(arc_evict_zthr);
(void) zthr_cancel(arc_reap_zthr);
mutex_destroy(&arc_evict_lock);
list_destroy(&arc_evict_waiters);
/*
* Free any buffers that were tagged for destruction. This needs
* to occur before arc_state_fini() runs and destroys the aggsum
* values which are updated when freeing scatter ABDs.
*/
l2arc_do_free_on_write();
/*
* buf_fini() must proceed arc_state_fini() because buf_fin() may
* trigger the release of kmem magazines, which can callback to
* arc_space_return() which accesses aggsums freed in act_state_fini().
*/
buf_fini();
arc_state_fini();
arc_unregister_hotplug();
/*
* We destroy the zthrs after all the ARC state has been
* torn down to avoid the case of them receiving any
* wakeup() signals after they are destroyed.
*/
zthr_destroy(arc_evict_zthr);
zthr_destroy(arc_reap_zthr);
ASSERT0(arc_loaned_bytes);
}
/*
* Level 2 ARC
*
* The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
* It uses dedicated storage devices to hold cached data, which are populated
* using large infrequent writes. The main role of this cache is to boost
* the performance of random read workloads. The intended L2ARC devices
* include short-stroked disks, solid state disks, and other media with
* substantially faster read latency than disk.
*
* +-----------------------+
* | ARC |
* +-----------------------+
* | ^ ^
* | | |
* l2arc_feed_thread() arc_read()
* | | |
* | l2arc read |
* V | |
* +---------------+ |
* | L2ARC | |
* +---------------+ |
* | ^ |
* l2arc_write() | |
* | | |
* V | |
* +-------+ +-------+
* | vdev | | vdev |
* | cache | | cache |
* +-------+ +-------+
* +=========+ .-----.
* : L2ARC : |-_____-|
* : devices : | Disks |
* +=========+ `-_____-'
*
* Read requests are satisfied from the following sources, in order:
*
* 1) ARC
* 2) vdev cache of L2ARC devices
* 3) L2ARC devices
* 4) vdev cache of disks
* 5) disks
*
* Some L2ARC device types exhibit extremely slow write performance.
* To accommodate for this there are some significant differences between
* the L2ARC and traditional cache design:
*
* 1. There is no eviction path from the ARC to the L2ARC. Evictions from
* the ARC behave as usual, freeing buffers and placing headers on ghost
* lists. The ARC does not send buffers to the L2ARC during eviction as
* this would add inflated write latencies for all ARC memory pressure.
*
* 2. The L2ARC attempts to cache data from the ARC before it is evicted.
* It does this by periodically scanning buffers from the eviction-end of
* the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
* not already there. It scans until a headroom of buffers is satisfied,
* which itself is a buffer for ARC eviction. If a compressible buffer is
* found during scanning and selected for writing to an L2ARC device, we
* temporarily boost scanning headroom during the next scan cycle to make
* sure we adapt to compression effects (which might significantly reduce
* the data volume we write to L2ARC). The thread that does this is
* l2arc_feed_thread(), illustrated below; example sizes are included to
* provide a better sense of ratio than this diagram:
*
* head --> tail
* +---------------------+----------+
* ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
* +---------------------+----------+ | o L2ARC eligible
* ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
* +---------------------+----------+ |
* 15.9 Gbytes ^ 32 Mbytes |
* headroom |
* l2arc_feed_thread()
* |
* l2arc write hand <--[oooo]--'
* | 8 Mbyte
* | write max
* V
* +==============================+
* L2ARC dev |####|#|###|###| |####| ... |
* +==============================+
* 32 Gbytes
*
* 3. If an ARC buffer is copied to the L2ARC but then hit instead of
* evicted, then the L2ARC has cached a buffer much sooner than it probably
* needed to, potentially wasting L2ARC device bandwidth and storage. It is
* safe to say that this is an uncommon case, since buffers at the end of
* the ARC lists have moved there due to inactivity.
*
* 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
* then the L2ARC simply misses copying some buffers. This serves as a
* pressure valve to prevent heavy read workloads from both stalling the ARC
* with waits and clogging the L2ARC with writes. This also helps prevent
* the potential for the L2ARC to churn if it attempts to cache content too
* quickly, such as during backups of the entire pool.
*
* 5. After system boot and before the ARC has filled main memory, there are
* no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
* lists can remain mostly static. Instead of searching from tail of these
* lists as pictured, the l2arc_feed_thread() will search from the list heads
* for eligible buffers, greatly increasing its chance of finding them.
*
* The L2ARC device write speed is also boosted during this time so that
* the L2ARC warms up faster. Since there have been no ARC evictions yet,
* there are no L2ARC reads, and no fear of degrading read performance
* through increased writes.
*
* 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
* the vdev queue can aggregate them into larger and fewer writes. Each
* device is written to in a rotor fashion, sweeping writes through
* available space then repeating.
*
* 7. The L2ARC does not store dirty content. It never needs to flush
* write buffers back to disk based storage.
*
* 8. If an ARC buffer is written (and dirtied) which also exists in the
* L2ARC, the now stale L2ARC buffer is immediately dropped.
*
* The performance of the L2ARC can be tweaked by a number of tunables, which
* may be necessary for different workloads:
*
* l2arc_write_max max write bytes per interval
* l2arc_write_boost extra write bytes during device warmup
* l2arc_noprefetch skip caching prefetched buffers
* l2arc_headroom number of max device writes to precache
* l2arc_headroom_boost when we find compressed buffers during ARC
* scanning, we multiply headroom by this
* percentage factor for the next scan cycle,
* since more compressed buffers are likely to
* be present
* l2arc_feed_secs seconds between L2ARC writing
*
* Tunables may be removed or added as future performance improvements are
* integrated, and also may become zpool properties.
*
* There are three key functions that control how the L2ARC warms up:
*
* l2arc_write_eligible() check if a buffer is eligible to cache
* l2arc_write_size() calculate how much to write
* l2arc_write_interval() calculate sleep delay between writes
*
* These three functions determine what to write, how much, and how quickly
* to send writes.
*
* L2ARC persistence:
*
* When writing buffers to L2ARC, we periodically add some metadata to
* make sure we can pick them up after reboot, thus dramatically reducing
* the impact that any downtime has on the performance of storage systems
* with large caches.
*
* The implementation works fairly simply by integrating the following two
* modifications:
*
* *) When writing to the L2ARC, we occasionally write a "l2arc log block",
* which is an additional piece of metadata which describes what's been
* written. This allows us to rebuild the arc_buf_hdr_t structures of the
* main ARC buffers. There are 2 linked-lists of log blocks headed by
* dh_start_lbps[2]. We alternate which chain we append to, so they are
* time-wise and offset-wise interleaved, but that is an optimization rather
* than for correctness. The log block also includes a pointer to the
* previous block in its chain.
*
* *) We reserve SPA_MINBLOCKSIZE of space at the start of each L2ARC device
* for our header bookkeeping purposes. This contains a device header,
* which contains our top-level reference structures. We update it each
* time we write a new log block, so that we're able to locate it in the
* L2ARC device. If this write results in an inconsistent device header
* (e.g. due to power failure), we detect this by verifying the header's
* checksum and simply fail to reconstruct the L2ARC after reboot.
*
* Implementation diagram:
*
* +=== L2ARC device (not to scale) ======================================+
* | ___two newest log block pointers__.__________ |
* | / \dh_start_lbps[1] |
* | / \ \dh_start_lbps[0]|
* |.___/__. V V |
* ||L2 dev|....|lb |bufs |lb |bufs |lb |bufs |lb |bufs |lb |---(empty)---|
* || hdr| ^ /^ /^ / / |
* |+------+ ...--\-------/ \-----/--\------/ / |
* | \--------------/ \--------------/ |
* +======================================================================+
*
* As can be seen on the diagram, rather than using a simple linked list,
* we use a pair of linked lists with alternating elements. This is a
* performance enhancement due to the fact that we only find out the
* address of the next log block access once the current block has been
* completely read in. Obviously, this hurts performance, because we'd be
* keeping the device's I/O queue at only a 1 operation deep, thus
* incurring a large amount of I/O round-trip latency. Having two lists
* allows us to fetch two log blocks ahead of where we are currently
* rebuilding L2ARC buffers.
*
* On-device data structures:
*
* L2ARC device header: l2arc_dev_hdr_phys_t
* L2ARC log block: l2arc_log_blk_phys_t
*
* L2ARC reconstruction:
*
* When writing data, we simply write in the standard rotary fashion,
* evicting buffers as we go and simply writing new data over them (writing
* a new log block every now and then). This obviously means that once we
* loop around the end of the device, we will start cutting into an already
* committed log block (and its referenced data buffers), like so:
*
* current write head__ __old tail
* \ /
* V V
* <--|bufs |lb |bufs |lb | |bufs |lb |bufs |lb |-->
* ^ ^^^^^^^^^___________________________________
* | \
* <<nextwrite>> may overwrite this blk and/or its bufs --'
*
* When importing the pool, we detect this situation and use it to stop
* our scanning process (see l2arc_rebuild).
*
* There is one significant caveat to consider when rebuilding ARC contents
* from an L2ARC device: what about invalidated buffers? Given the above
* construction, we cannot update blocks which we've already written to amend
* them to remove buffers which were invalidated. Thus, during reconstruction,
* we might be populating the cache with buffers for data that's not on the
* main pool anymore, or may have been overwritten!
*
* As it turns out, this isn't a problem. Every arc_read request includes
* both the DVA and, crucially, the birth TXG of the BP the caller is
* looking for. So even if the cache were populated by completely rotten
* blocks for data that had been long deleted and/or overwritten, we'll
* never actually return bad data from the cache, since the DVA with the
* birth TXG uniquely identify a block in space and time - once created,
* a block is immutable on disk. The worst thing we have done is wasted
* some time and memory at l2arc rebuild to reconstruct outdated ARC
* entries that will get dropped from the l2arc as it is being updated
* with new blocks.
*
* L2ARC buffers that have been evicted by l2arc_evict() ahead of the write
* hand are not restored. This is done by saving the offset (in bytes)
* l2arc_evict() has evicted to in the L2ARC device header and taking it
* into account when restoring buffers.
*/
static boolean_t
l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
{
/*
* A buffer is *not* eligible for the L2ARC if it:
* 1. belongs to a different spa.
* 2. is already cached on the L2ARC.
* 3. has an I/O in progress (it may be an incomplete read).
* 4. is flagged not eligible (zfs property).
*/
if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) ||
HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr))
return (B_FALSE);
return (B_TRUE);
}
static uint64_t
l2arc_write_size(l2arc_dev_t *dev)
{
uint64_t size, dev_size, tsize;
/*
* Make sure our globals have meaningful values in case the user
* altered them.
*/
size = l2arc_write_max;
if (size == 0) {
cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
"be greater than zero, resetting it to the default (%d)",
L2ARC_WRITE_SIZE);
size = l2arc_write_max = L2ARC_WRITE_SIZE;
}
if (arc_warm == B_FALSE)
size += l2arc_write_boost;
/*
* Make sure the write size does not exceed the size of the cache
* device. This is important in l2arc_evict(), otherwise infinite
* iteration can occur.
*/
dev_size = dev->l2ad_end - dev->l2ad_start;
tsize = size + l2arc_log_blk_overhead(size, dev);
if (dev->l2ad_vdev->vdev_has_trim && l2arc_trim_ahead > 0)
tsize += MAX(64 * 1024 * 1024,
(tsize * l2arc_trim_ahead) / 100);
if (tsize >= dev_size) {
cmn_err(CE_NOTE, "l2arc_write_max or l2arc_write_boost "
"plus the overhead of log blocks (persistent L2ARC, "
"%llu bytes) exceeds the size of the cache device "
"(guid %llu), resetting them to the default (%d)",
(u_longlong_t)l2arc_log_blk_overhead(size, dev),
(u_longlong_t)dev->l2ad_vdev->vdev_guid, L2ARC_WRITE_SIZE);
size = l2arc_write_max = l2arc_write_boost = L2ARC_WRITE_SIZE;
if (arc_warm == B_FALSE)
size += l2arc_write_boost;
}
return (size);
}
static clock_t
l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
{
clock_t interval, next, now;
/*
* If the ARC lists are busy, increase our write rate; if the
* lists are stale, idle back. This is achieved by checking
* how much we previously wrote - if it was more than half of
* what we wanted, schedule the next write much sooner.
*/
if (l2arc_feed_again && wrote > (wanted / 2))
interval = (hz * l2arc_feed_min_ms) / 1000;
else
interval = hz * l2arc_feed_secs;
now = ddi_get_lbolt();
next = MAX(now, MIN(now + interval, began + interval));
return (next);
}
/*
* Cycle through L2ARC devices. This is how L2ARC load balances.
* If a device is returned, this also returns holding the spa config lock.
*/
static l2arc_dev_t *
l2arc_dev_get_next(void)
{
l2arc_dev_t *first, *next = NULL;
/*
* Lock out the removal of spas (spa_namespace_lock), then removal
* of cache devices (l2arc_dev_mtx). Once a device has been selected,
* both locks will be dropped and a spa config lock held instead.
*/
mutex_enter(&spa_namespace_lock);
mutex_enter(&l2arc_dev_mtx);
/* if there are no vdevs, there is nothing to do */
if (l2arc_ndev == 0)
goto out;
first = NULL;
next = l2arc_dev_last;
do {
/* loop around the list looking for a non-faulted vdev */
if (next == NULL) {
next = list_head(l2arc_dev_list);
} else {
next = list_next(l2arc_dev_list, next);
if (next == NULL)
next = list_head(l2arc_dev_list);
}
/* if we have come back to the start, bail out */
if (first == NULL)
first = next;
else if (next == first)
break;
} while (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
next->l2ad_trim_all);
/* if we were unable to find any usable vdevs, return NULL */
if (vdev_is_dead(next->l2ad_vdev) || next->l2ad_rebuild ||
next->l2ad_trim_all)
next = NULL;
l2arc_dev_last = next;
out:
mutex_exit(&l2arc_dev_mtx);
/*
* Grab the config lock to prevent the 'next' device from being
* removed while we are writing to it.
*/
if (next != NULL)
spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
mutex_exit(&spa_namespace_lock);
return (next);
}
/*
* Free buffers that were tagged for destruction.
*/
static void
l2arc_do_free_on_write(void)
{
list_t *buflist;
l2arc_data_free_t *df, *df_prev;
mutex_enter(&l2arc_free_on_write_mtx);
buflist = l2arc_free_on_write;
for (df = list_tail(buflist); df; df = df_prev) {
df_prev = list_prev(buflist, df);
ASSERT3P(df->l2df_abd, !=, NULL);
abd_free(df->l2df_abd);
list_remove(buflist, df);
kmem_free(df, sizeof (l2arc_data_free_t));
}
mutex_exit(&l2arc_free_on_write_mtx);
}
/*
* A write to a cache device has completed. Update all headers to allow
* reads from these buffers to begin.
*/
static void
l2arc_write_done(zio_t *zio)
{
l2arc_write_callback_t *cb;
l2arc_lb_abd_buf_t *abd_buf;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
l2arc_dev_t *dev;
l2arc_dev_hdr_phys_t *l2dhdr;
list_t *buflist;
arc_buf_hdr_t *head, *hdr, *hdr_prev;
kmutex_t *hash_lock;
int64_t bytes_dropped = 0;
cb = zio->io_private;
ASSERT3P(cb, !=, NULL);
dev = cb->l2wcb_dev;
l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev, !=, NULL);
head = cb->l2wcb_head;
ASSERT3P(head, !=, NULL);
buflist = &dev->l2ad_buflist;
ASSERT3P(buflist, !=, NULL);
DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
l2arc_write_callback_t *, cb);
/*
* All writes completed, or an error was hit.
*/
top:
mutex_enter(&dev->l2ad_mtx);
for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. We must retry so we
* don't leave the ARC_FLAG_L2_WRITING bit set.
*/
ARCSTAT_BUMP(arcstat_l2_writes_lock_retry);
/*
* We don't want to rescan the headers we've
* already marked as having been written out, so
* we reinsert the head node so we can pick up
* where we left off.
*/
list_remove(buflist, head);
list_insert_after(buflist, hdr, head);
mutex_exit(&dev->l2ad_mtx);
/*
* We wait for the hash lock to become available
* to try and prevent busy waiting, and increase
* the chance we'll be able to acquire the lock
* the next time around.
*/
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* We could not have been moved into the arc_l2c_only
* state while in-flight due to our ARC_FLAG_L2_WRITING
* bit being set. Let's just ensure that's being enforced.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* Skipped - drop L2ARC entry and mark the header as no
* longer L2 eligibile.
*/
if (zio->io_error != 0) {
/*
* Error - drop L2ARC entry.
*/
list_remove(buflist, hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
uint64_t psize = HDR_GET_PSIZE(hdr);
l2arc_hdr_arcstats_decrement(hdr);
bytes_dropped +=
vdev_psize_to_asize(dev->l2ad_vdev, psize);
(void) zfs_refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
}
/*
* Allow ARC to begin reads and ghost list evictions to
* this L2ARC entry.
*/
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
}
/*
* Free the allocated abd buffers for writing the log blocks.
* If the zio failed reclaim the allocated space and remove the
* pointers to these log blocks from the log block pointer list
* of the L2ARC device.
*/
while ((abd_buf = list_remove_tail(&cb->l2wcb_abd_list)) != NULL) {
abd_free(abd_buf->abd);
zio_buf_free(abd_buf, sizeof (*abd_buf));
if (zio->io_error != 0) {
lb_ptr_buf = list_remove_head(&dev->l2ad_lbptr_list);
/*
* L2BLK_GET_PSIZE returns aligned size for log
* blocks.
*/
uint64_t asize =
L2BLK_GET_PSIZE((lb_ptr_buf->lb_ptr)->lbp_prop);
bytes_dropped += asize;
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
list_destroy(&cb->l2wcb_abd_list);
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_writes_error);
/*
* Restore the lbps array in the header to its previous state.
* If the list of log block pointers is empty, zero out the
* log block pointers in the device header.
*/
lb_ptr_buf = list_head(&dev->l2ad_lbptr_list);
for (int i = 0; i < 2; i++) {
if (lb_ptr_buf == NULL) {
/*
* If the list is empty zero out the device
* header. Otherwise zero out the second log
* block pointer in the header.
*/
if (i == 0) {
bzero(l2dhdr, dev->l2ad_dev_hdr_asize);
} else {
bzero(&l2dhdr->dh_start_lbps[i],
sizeof (l2arc_log_blkptr_t));
}
break;
}
bcopy(lb_ptr_buf->lb_ptr, &l2dhdr->dh_start_lbps[i],
sizeof (l2arc_log_blkptr_t));
lb_ptr_buf = list_next(&dev->l2ad_lbptr_list,
lb_ptr_buf);
}
}
ARCSTAT_BUMP(arcstat_l2_writes_done);
list_remove(buflist, head);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
mutex_exit(&dev->l2ad_mtx);
ASSERT(dev->l2ad_vdev != NULL);
vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
l2arc_do_free_on_write();
kmem_free(cb, sizeof (l2arc_write_callback_t));
}
static int
l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
{
int ret;
spa_t *spa = zio->io_spa;
arc_buf_hdr_t *hdr = cb->l2rcb_hdr;
blkptr_t *bp = zio->io_bp;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/*
* ZIL data is never be written to the L2ARC, so we don't need
* special handling for its unique MAC storage.
*/
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* If the data was encrypted, decrypt it now. Note that
* we must check the bp here and not the hdr, since the
* hdr does not have its encryption parameters updated
* until arc_read_done().
*/
if (BP_IS_ENCRYPTED(bp)) {
abd_t *eabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_DO_ADAPT | ARC_HDR_USE_RESERVE);
zio_crypt_decode_params_bp(bp, salt, iv);
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_abd(B_FALSE, spa, &cb->l2rcb_zb,
BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
salt, iv, mac, HDR_GET_PSIZE(hdr), eabd,
hdr->b_l1hdr.b_pabd, &no_crypt);
if (ret != 0) {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
goto error;
}
/*
* If we actually performed decryption, replace b_pabd
* with the decrypted data. Otherwise we can just throw
* our decryption buffer away.
*/
if (!no_crypt) {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = eabd;
zio->io_abd = eabd;
} else {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
}
}
/*
* If the L2ARC block was compressed, but ARC compression
* is disabled we decompress the data into a new buffer and
* replace the existing data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr,
ARC_HDR_DO_ADAPT | ARC_HDR_USE_RESERVE);
void *tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr), &hdr->b_complevel);
if (ret != 0) {
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, cabd, arc_hdr_size(hdr), hdr);
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
zio->io_abd = cabd;
zio->io_size = HDR_GET_LSIZE(hdr);
}
return (0);
error:
return (ret);
}
/*
* A read to a cache device completed. Validate buffer contents before
* handing over to the regular ARC routines.
*/
static void
l2arc_read_done(zio_t *zio)
{
int tfm_error = 0;
l2arc_read_callback_t *cb = zio->io_private;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
boolean_t valid_cksum;
boolean_t using_rdata = (BP_IS_ENCRYPTED(&cb->l2rcb_bp) &&
(cb->l2rcb_flags & ZIO_FLAG_RAW_ENCRYPT));
ASSERT3P(zio->io_vd, !=, NULL);
ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
ASSERT3P(cb, !=, NULL);
hdr = cb->l2rcb_hdr;
ASSERT3P(hdr, !=, NULL);
hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
/*
* If the data was read into a temporary buffer,
* move it and free the buffer.
*/
if (cb->l2rcb_abd != NULL) {
ASSERT3U(arc_hdr_size(hdr), <, zio->io_size);
if (zio->io_error == 0) {
if (using_rdata) {
abd_copy(hdr->b_crypt_hdr.b_rabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
} else {
abd_copy(hdr->b_l1hdr.b_pabd,
cb->l2rcb_abd, arc_hdr_size(hdr));
}
}
/*
* The following must be done regardless of whether
* there was an error:
* - free the temporary buffer
* - point zio to the real ARC buffer
* - set zio size accordingly
* These are required because zio is either re-used for
* an I/O of the block in the case of the error
* or the zio is passed to arc_read_done() and it
* needs real data.
*/
abd_free(cb->l2rcb_abd);
zio->io_size = zio->io_orig_size = arc_hdr_size(hdr);
if (using_rdata) {
ASSERT(HDR_HAS_RABD(hdr));
zio->io_abd = zio->io_orig_abd =
hdr->b_crypt_hdr.b_rabd;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd;
}
}
ASSERT3P(zio->io_abd, !=, NULL);
/*
* Check this survived the L2ARC journey.
*/
ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd ||
(HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd));
zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
zio->io_prop.zp_complevel = hdr->b_complevel;
valid_cksum = arc_cksum_is_equal(hdr, zio);
/*
* b_rabd will always match the data as it exists on disk if it is
* being used. Therefore if we are reading into b_rabd we do not
* attempt to untransform the data.
*/
if (valid_cksum && !using_rdata)
tfm_error = l2arc_untransform(zio, cb);
if (valid_cksum && tfm_error == 0 && zio->io_error == 0 &&
!HDR_L2_EVICTED(hdr)) {
mutex_exit(hash_lock);
zio->io_private = hdr;
arc_read_done(zio);
} else {
/*
* Buffer didn't survive caching. Increment stats and
* reissue to the original storage device.
*/
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_io_error);
} else {
zio->io_error = SET_ERROR(EIO);
}
if (!valid_cksum || tfm_error != 0)
ARCSTAT_BUMP(arcstat_l2_cksum_bad);
/*
* If there's no waiter, issue an async i/o to the primary
* storage now. If there *is* a waiter, the caller must
* issue the i/o in a context where it's OK to block.
*/
if (zio->io_waiter == NULL) {
zio_t *pio = zio_unique_parent(zio);
void *abd = (using_rdata) ?
hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd;
ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
zio = zio_read(pio, zio->io_spa, zio->io_bp,
abd, zio->io_size, arc_read_done,
hdr, zio->io_priority, cb->l2rcb_flags,
&cb->l2rcb_zb);
/*
* Original ZIO will be freed, so we need to update
* ARC header with the new ZIO pointer to be used
* by zio_change_priority() in arc_read().
*/
for (struct arc_callback *acb = hdr->b_l1hdr.b_acb;
acb != NULL; acb = acb->acb_next)
acb->acb_zio_head = zio;
mutex_exit(hash_lock);
zio_nowait(zio);
} else {
mutex_exit(hash_lock);
}
}
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* This is the list priority from which the L2ARC will search for pages to
* cache. This is used within loops (0..3) to cycle through lists in the
* desired order. This order can have a significant effect on cache
* performance.
*
* Currently the metadata lists are hit first, MFU then MRU, followed by
* the data lists. This function returns a locked list, and also returns
* the lock pointer.
*/
static multilist_sublist_t *
l2arc_sublist_lock(int list_num)
{
multilist_t *ml = NULL;
unsigned int idx;
ASSERT(list_num >= 0 && list_num < L2ARC_FEED_TYPES);
switch (list_num) {
case 0:
ml = &arc_mfu->arcs_list[ARC_BUFC_METADATA];
break;
case 1:
ml = &arc_mru->arcs_list[ARC_BUFC_METADATA];
break;
case 2:
ml = &arc_mfu->arcs_list[ARC_BUFC_DATA];
break;
case 3:
ml = &arc_mru->arcs_list[ARC_BUFC_DATA];
break;
default:
return (NULL);
}
/*
* Return a randomly-selected sublist. This is acceptable
* because the caller feeds only a little bit of data for each
* call (8MB). Subsequent calls will result in different
* sublists being selected.
*/
idx = multilist_get_random_index(ml);
return (multilist_sublist_lock(ml, idx));
}
/*
* Calculates the maximum overhead of L2ARC metadata log blocks for a given
* L2ARC write size. l2arc_evict and l2arc_write_size need to include this
* overhead in processing to make sure there is enough headroom available
* when writing buffers.
*/
static inline uint64_t
l2arc_log_blk_overhead(uint64_t write_sz, l2arc_dev_t *dev)
{
if (dev->l2ad_log_entries == 0) {
return (0);
} else {
uint64_t log_entries = write_sz >> SPA_MINBLOCKSHIFT;
uint64_t log_blocks = (log_entries +
dev->l2ad_log_entries - 1) /
dev->l2ad_log_entries;
return (vdev_psize_to_asize(dev->l2ad_vdev,
sizeof (l2arc_log_blk_phys_t)) * log_blocks);
}
}
/*
* Evict buffers from the device write hand to the distance specified in
* bytes. This distance may span populated buffers, it may span nothing.
* This is clearing a region on the L2ARC device ready for writing.
* If the 'all' boolean is set, every buffer is evicted.
*/
static void
l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
{
list_t *buflist;
arc_buf_hdr_t *hdr, *hdr_prev;
kmutex_t *hash_lock;
uint64_t taddr;
l2arc_lb_ptr_buf_t *lb_ptr_buf, *lb_ptr_buf_prev;
vdev_t *vd = dev->l2ad_vdev;
boolean_t rerun;
buflist = &dev->l2ad_buflist;
/*
* We need to add in the worst case scenario of log block overhead.
*/
distance += l2arc_log_blk_overhead(distance, dev);
if (vd->vdev_has_trim && l2arc_trim_ahead > 0) {
/*
* Trim ahead of the write size 64MB or (l2arc_trim_ahead/100)
* times the write size, whichever is greater.
*/
distance += MAX(64 * 1024 * 1024,
(distance * l2arc_trim_ahead) / 100);
}
top:
rerun = B_FALSE;
if (dev->l2ad_hand >= (dev->l2ad_end - distance)) {
/*
* When there is no space to accommodate upcoming writes,
* evict to the end. Then bump the write and evict hands
* to the start and iterate. This iteration does not
* happen indefinitely as we make sure in
* l2arc_write_size() that when the write hand is reset,
* the write size does not exceed the end of the device.
*/
rerun = B_TRUE;
taddr = dev->l2ad_end;
} else {
taddr = dev->l2ad_hand + distance;
}
DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
uint64_t, taddr, boolean_t, all);
if (!all) {
/*
* This check has to be placed after deciding whether to
* iterate (rerun).
*/
if (dev->l2ad_first) {
/*
* This is the first sweep through the device. There is
* nothing to evict. We have already trimmmed the
* whole device.
*/
goto out;
} else {
/*
* Trim the space to be evicted.
*/
if (vd->vdev_has_trim && dev->l2ad_evict < taddr &&
l2arc_trim_ahead > 0) {
/*
* We have to drop the spa_config lock because
* vdev_trim_range() will acquire it.
* l2ad_evict already accounts for the label
* size. To prevent vdev_trim_ranges() from
* adding it again, we subtract it from
* l2ad_evict.
*/
spa_config_exit(dev->l2ad_spa, SCL_L2ARC, dev);
vdev_trim_simple(vd,
dev->l2ad_evict - VDEV_LABEL_START_SIZE,
taddr - dev->l2ad_evict);
spa_config_enter(dev->l2ad_spa, SCL_L2ARC, dev,
RW_READER);
}
/*
* When rebuilding L2ARC we retrieve the evict hand
* from the header of the device. Of note, l2arc_evict()
* does not actually delete buffers from the cache
* device, but trimming may do so depending on the
* hardware implementation. Thus keeping track of the
* evict hand is useful.
*/
dev->l2ad_evict = MAX(dev->l2ad_evict, taddr);
}
}
retry:
mutex_enter(&dev->l2ad_mtx);
/*
* We have to account for evicted log blocks. Run vdev_space_update()
* on log blocks whose offset (in bytes) is before the evicted offset
* (in bytes) by searching in the list of pointers to log blocks
* present in the L2ARC device.
*/
for (lb_ptr_buf = list_tail(&dev->l2ad_lbptr_list); lb_ptr_buf;
lb_ptr_buf = lb_ptr_buf_prev) {
lb_ptr_buf_prev = list_prev(&dev->l2ad_lbptr_list, lb_ptr_buf);
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE(
(lb_ptr_buf->lb_ptr)->lbp_prop);
/*
* We don't worry about log blocks left behind (ie
* lbp_payload_start < l2ad_hand) because l2arc_write_buffers()
* will never write more than l2arc_evict() evicts.
*/
if (!all && l2arc_log_blkptr_valid(dev, lb_ptr_buf->lb_ptr)) {
break;
} else {
vdev_space_update(vd, -asize, 0, 0);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, -asize);
ARCSTAT_BUMPDOWN(arcstat_l2_log_blk_count);
zfs_refcount_remove_many(&dev->l2ad_lb_asize, asize,
lb_ptr_buf);
zfs_refcount_remove(&dev->l2ad_lb_count, lb_ptr_buf);
list_remove(&dev->l2ad_lbptr_list, lb_ptr_buf);
kmem_free(lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
kmem_free(lb_ptr_buf, sizeof (l2arc_lb_ptr_buf_t));
}
}
for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
ASSERT(!HDR_EMPTY(hdr));
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. Retry.
*/
ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
mutex_exit(&dev->l2ad_mtx);
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto retry;
}
/*
* A header can't be on this list if it doesn't have L2 header.
*/
ASSERT(HDR_HAS_L2HDR(hdr));
/* Ensure this header has finished being written. */
ASSERT(!HDR_L2_WRITING(hdr));
ASSERT(!HDR_L2_WRITE_HEAD(hdr));
if (!all && (hdr->b_l2hdr.b_daddr >= dev->l2ad_evict ||
hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
/*
* We've evicted to the target address,
* or the end of the device.
*/
mutex_exit(hash_lock);
break;
}
if (!HDR_HAS_L1HDR(hdr)) {
ASSERT(!HDR_L2_READING(hdr));
/*
* This doesn't exist in the ARC. Destroy.
* arc_hdr_destroy() will call list_remove()
* and decrement arcstat_l2_lsize.
*/
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
} else {
ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only);
ARCSTAT_BUMP(arcstat_l2_evict_l1cached);
/*
* Invalidate issued or about to be issued
* reads, since we may be about to write
* over this location.
*/
if (HDR_L2_READING(hdr)) {
ARCSTAT_BUMP(arcstat_l2_evict_reading);
arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED);
}
arc_hdr_l2hdr_destroy(hdr);
}
mutex_exit(hash_lock);
}
mutex_exit(&dev->l2ad_mtx);
out:
/*
* We need to check if we evict all buffers, otherwise we may iterate
* unnecessarily.
*/
if (!all && rerun) {
/*
* Bump device hand to the device start if it is approaching the
* end. l2arc_evict() has already evicted ahead for this case.
*/
dev->l2ad_hand = dev->l2ad_start;
dev->l2ad_evict = dev->l2ad_start;
dev->l2ad_first = B_FALSE;
goto top;
}
if (!all) {
/*
* In case of cache device removal (all) the following
* assertions may be violated without functional consequences
* as the device is about to be removed.
*/
ASSERT3U(dev->l2ad_hand + distance, <, dev->l2ad_end);
if (!dev->l2ad_first)
ASSERT3U(dev->l2ad_hand, <, dev->l2ad_evict);
}
}
/*
* Handle any abd transforms that might be required for writing to the L2ARC.
* If successful, this function will always return an abd with the data
* transformed as it is on disk in a new abd of asize bytes.
*/
static int
l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
abd_t **abd_out)
{
int ret;
void *tmp = NULL;
abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd;
enum zio_compress compress = HDR_GET_COMPRESS(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t size = arc_hdr_size(hdr);
boolean_t ismd = HDR_ISTYPE_METADATA(hdr);
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
dsl_crypto_key_t *dck = NULL;
uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 };
boolean_t no_crypt = B_FALSE;
ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) ||
HDR_ENCRYPTED(hdr) || HDR_SHARED_DATA(hdr) || psize != asize);
ASSERT3U(psize, <=, asize);
/*
* If this data simply needs its own buffer, we simply allocate it
* and copy the data. This may be done to eliminate a dependency on a
* shared buffer or to reallocate the buffer to match asize.
*/
if (HDR_HAS_RABD(hdr) && asize != psize) {
ASSERT3U(asize, >=, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, psize);
if (psize != asize)
abd_zero_off(to_write, psize, asize - psize);
goto out;
}
if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) &&
!HDR_ENCRYPTED(hdr)) {
ASSERT3U(size, ==, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_l1hdr.b_pabd, size);
if (size != asize)
abd_zero_off(to_write, size, asize - size);
goto out;
}
if (compress != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) {
cabd = abd_alloc_for_io(asize, ismd);
tmp = abd_borrow_buf(cabd, asize);
psize = zio_compress_data(compress, to_write, tmp, size,
hdr->b_complevel);
if (psize >= size) {
abd_return_buf(cabd, tmp, asize);
HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF);
to_write = cabd;
abd_copy(to_write, hdr->b_l1hdr.b_pabd, size);
if (size != asize)
abd_zero_off(to_write, size, asize - size);
goto encrypt;
}
ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr));
if (psize < asize)
bzero((char *)tmp + psize, asize - psize);
psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, asize);
to_write = cabd;
}
encrypt:
if (HDR_ENCRYPTED(hdr)) {
eabd = abd_alloc_for_io(asize, ismd);
/*
* If the dataset was disowned before the buffer
* made it to this point, the key to re-encrypt
* it won't be available. In this case we simply
* won't write the buffer to the L2ARC.
*/
ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj,
FTAG, &dck);
if (ret != 0)
goto error;
ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
hdr->b_crypt_hdr.b_ot, bswap, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv, mac, psize, to_write, eabd,
&no_crypt);
if (ret != 0)
goto error;
if (no_crypt)
abd_copy(eabd, to_write, psize);
if (psize != asize)
abd_zero_off(eabd, psize, asize - psize);
/* assert that the MAC we got here matches the one we saved */
ASSERT0(bcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN));
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (to_write == cabd)
abd_free(cabd);
to_write = eabd;
}
out:
ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd);
*abd_out = to_write;
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (cabd != NULL)
abd_free(cabd);
if (eabd != NULL)
abd_free(eabd);
*abd_out = NULL;
return (ret);
}
static void
l2arc_blk_fetch_done(zio_t *zio)
{
l2arc_read_callback_t *cb;
cb = zio->io_private;
if (cb->l2rcb_abd != NULL)
abd_free(cb->l2rcb_abd);
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* Find and write ARC buffers to the L2ARC device.
*
* An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
* for reading until they have completed writing.
* The headroom_boost is an in-out parameter used to maintain headroom boost
* state between calls to this function.
*
* Returns the number of bytes actually written (which may be smaller than
* the delta by which the device hand has changed due to alignment and the
* writing of log blocks).
*/
static uint64_t
l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
{
arc_buf_hdr_t *hdr, *hdr_prev, *head;
uint64_t write_asize, write_psize, write_lsize, headroom;
boolean_t full;
l2arc_write_callback_t *cb = NULL;
zio_t *pio, *wzio;
uint64_t guid = spa_load_guid(spa);
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
ASSERT3P(dev->l2ad_vdev, !=, NULL);
pio = NULL;
write_lsize = write_asize = write_psize = 0;
full = B_FALSE;
head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR);
/*
* Copy buffers for L2ARC writing.
*/
for (int pass = 0; pass < L2ARC_FEED_TYPES; pass++) {
/*
* If pass == 1 or 3, we cache MRU metadata and data
* respectively.
*/
if (l2arc_mfuonly) {
if (pass == 1 || pass == 3)
continue;
}
multilist_sublist_t *mls = l2arc_sublist_lock(pass);
uint64_t passed_sz = 0;
VERIFY3P(mls, !=, NULL);
/*
* L2ARC fast warmup.
*
* Until the ARC is warm and starts to evict, read from the
* head of the ARC lists rather than the tail.
*/
if (arc_warm == B_FALSE)
hdr = multilist_sublist_head(mls);
else
hdr = multilist_sublist_tail(mls);
headroom = target_sz * l2arc_headroom;
if (zfs_compressed_arc_enabled)
headroom = (headroom * l2arc_headroom_boost) / 100;
for (; hdr; hdr = hdr_prev) {
kmutex_t *hash_lock;
abd_t *to_write = NULL;
if (arc_warm == B_FALSE)
hdr_prev = multilist_sublist_next(mls, hdr);
else
hdr_prev = multilist_sublist_prev(mls, hdr);
hash_lock = HDR_LOCK(hdr);
if (!mutex_tryenter(hash_lock)) {
/*
* Skip this buffer rather than waiting.
*/
continue;
}
passed_sz += HDR_GET_LSIZE(hdr);
if (l2arc_headroom != 0 && passed_sz > headroom) {
/*
* Searched too far.
*/
mutex_exit(hash_lock);
break;
}
if (!l2arc_write_eligible(guid, hdr)) {
mutex_exit(hash_lock);
continue;
}
/*
* We rely on the L1 portion of the header below, so
* it's invalid for this header to have been evicted out
* of the ghost cache, prior to being written out. The
* ARC_FLAG_L2_WRITING bit ensures this won't happen.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
ASSERT3U(arc_hdr_size(hdr), >, 0);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev,
psize);
if ((write_asize + asize) > target_sz) {
full = B_TRUE;
mutex_exit(hash_lock);
break;
}
/*
* We rely on the L1 portion of the header below, so
* it's invalid for this header to have been evicted out
* of the ghost cache, prior to being written out. The
* ARC_FLAG_L2_WRITING bit ensures this won't happen.
*/
arc_hdr_set_flags(hdr, ARC_FLAG_L2_WRITING);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
ASSERT3U(arc_hdr_size(hdr), >, 0);
/*
* If this header has b_rabd, we can use this since it
* must always match the data exactly as it exists on
* disk. Otherwise, the L2ARC can normally use the
* hdr's data, but if we're sharing data between the
* hdr and one of its bufs, L2ARC needs its own copy of
* the data so that the ZIO below can't race with the
* buf consumer. To ensure that this copy will be
* available for the lifetime of the ZIO and be cleaned
* up afterwards, we add it to the l2arc_free_on_write
* queue. If we need to apply any transforms to the
* data (compression, encryption) we will also need the
* extra buffer.
*/
if (HDR_HAS_RABD(hdr) && psize == asize) {
to_write = hdr->b_crypt_hdr.b_rabd;
} else if ((HDR_COMPRESSION_ENABLED(hdr) ||
HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) &&
!HDR_ENCRYPTED(hdr) && !HDR_SHARED_DATA(hdr) &&
psize == asize) {
to_write = hdr->b_l1hdr.b_pabd;
} else {
int ret;
arc_buf_contents_t type = arc_buf_type(hdr);
ret = l2arc_apply_transforms(spa, hdr, asize,
&to_write);
if (ret != 0) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
continue;
}
l2arc_free_abd_on_write(to_write, asize, type);
}
if (pio == NULL) {
/*
* Insert a dummy header on the buflist so
* l2arc_write_done() can find where the
* write buffers begin without searching.
*/
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, head);
mutex_exit(&dev->l2ad_mtx);
cb = kmem_alloc(
sizeof (l2arc_write_callback_t), KM_SLEEP);
cb->l2wcb_dev = dev;
cb->l2wcb_head = head;
/*
* Create a list to save allocated abd buffers
* for l2arc_log_blk_commit().
*/
list_create(&cb->l2wcb_abd_list,
sizeof (l2arc_lb_abd_buf_t),
offsetof(l2arc_lb_abd_buf_t, node));
pio = zio_root(spa, l2arc_write_done, cb,
ZIO_FLAG_CANFAIL);
}
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_hits = 0;
hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
hdr->b_l2hdr.b_arcs_state =
hdr->b_l1hdr.b_state->arcs_state;
arc_hdr_set_flags(hdr, ARC_FLAG_HAS_L2HDR);
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
wzio = zio_write_phys(pio, dev->l2ad_vdev,
hdr->b_l2hdr.b_daddr, asize, to_write,
ZIO_CHECKSUM_OFF, NULL, hdr,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_CANFAIL, B_FALSE);
write_lsize += HDR_GET_LSIZE(hdr);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
zio_t *, wzio);
write_psize += psize;
write_asize += asize;
dev->l2ad_hand += asize;
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_exit(hash_lock);
/*
* Append buf info to current log and commit if full.
* arcstat_l2_{size,asize} kstats are updated
* internally.
*/
if (l2arc_log_blk_insert(dev, hdr))
l2arc_log_blk_commit(dev, pio, cb);
zio_nowait(wzio);
}
multilist_sublist_unlock(mls);
if (full == B_TRUE)
break;
}
/* No buffers selected for writing? */
if (pio == NULL) {
ASSERT0(write_lsize);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
/*
* Although we did not write any buffers l2ad_evict may
* have advanced.
*/
if (dev->l2ad_evict != l2dhdr->dh_evict)
l2arc_dev_hdr_update(dev);
return (0);
}
if (!dev->l2ad_first)
ASSERT3U(dev->l2ad_hand, <=, dev->l2ad_evict);
ASSERT3U(write_asize, <=, target_sz);
ARCSTAT_BUMP(arcstat_l2_writes_sent);
ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize);
dev->l2ad_writing = B_TRUE;
(void) zio_wait(pio);
dev->l2ad_writing = B_FALSE;
/*
* Update the device header after the zio completes as
* l2arc_write_done() may have updated the memory holding the log block
* pointers in the device header.
*/
l2arc_dev_hdr_update(dev);
return (write_asize);
}
static boolean_t
l2arc_hdr_limit_reached(void)
{
int64_t s = aggsum_upper_bound(&arc_sums.arcstat_l2_hdr_size);
return (arc_reclaim_needed() || (s > arc_meta_limit * 3 / 4) ||
(s > (arc_warm ? arc_c : arc_c_max) * l2arc_meta_percent / 100));
}
/*
* This thread feeds the L2ARC at regular intervals. This is the beating
* heart of the L2ARC.
*/
/* ARGSUSED */
static void
l2arc_feed_thread(void *unused)
{
callb_cpr_t cpr;
l2arc_dev_t *dev;
spa_t *spa;
uint64_t size, wrote;
clock_t begin, next = ddi_get_lbolt();
fstrans_cookie_t cookie;
CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
mutex_enter(&l2arc_feed_thr_lock);
cookie = spl_fstrans_mark();
while (l2arc_thread_exit == 0) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_idle(&l2arc_feed_thr_cv,
&l2arc_feed_thr_lock, next);
CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
next = ddi_get_lbolt() + hz;
/*
* Quick check for L2ARC devices.
*/
mutex_enter(&l2arc_dev_mtx);
if (l2arc_ndev == 0) {
mutex_exit(&l2arc_dev_mtx);
continue;
}
mutex_exit(&l2arc_dev_mtx);
begin = ddi_get_lbolt();
/*
* This selects the next l2arc device to write to, and in
* doing so the next spa to feed from: dev->l2ad_spa. This
* will return NULL if there are now no l2arc devices or if
* they are all faulted.
*
* If a device is returned, its spa's config lock is also
* held to prevent device removal. l2arc_dev_get_next()
* will grab and release l2arc_dev_mtx.
*/
if ((dev = l2arc_dev_get_next()) == NULL)
continue;
spa = dev->l2ad_spa;
ASSERT3P(spa, !=, NULL);
/*
* If the pool is read-only then force the feed thread to
* sleep a little longer.
*/
if (!spa_writeable(spa)) {
next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
/*
* Avoid contributing to memory pressure.
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
ARCSTAT_BUMP(arcstat_l2_feeds);
size = l2arc_write_size(dev);
/*
* Evict L2ARC buffers that will be overwritten.
*/
l2arc_evict(dev, size, B_FALSE);
/*
* Write ARC buffers.
*/
wrote = l2arc_write_buffers(spa, dev, size);
/*
* Calculate interval between writes.
*/
next = l2arc_write_interval(begin, size, wrote);
spa_config_exit(spa, SCL_L2ARC, dev);
}
spl_fstrans_unmark(cookie);
l2arc_thread_exit = 0;
cv_broadcast(&l2arc_feed_thr_cv);
CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
thread_exit();
}
boolean_t
l2arc_vdev_present(vdev_t *vd)
{
return (l2arc_vdev_get(vd) != NULL);
}
/*
* Returns the l2arc_dev_t associated with a particular vdev_t or NULL if
* the vdev_t isn't an L2ARC device.
*/
l2arc_dev_t *
l2arc_vdev_get(vdev_t *vd)
{
l2arc_dev_t *dev;
mutex_enter(&l2arc_dev_mtx);
for (dev = list_head(l2arc_dev_list); dev != NULL;
dev = list_next(l2arc_dev_list, dev)) {
if (dev->l2ad_vdev == vd)
break;
}
mutex_exit(&l2arc_dev_mtx);
return (dev);
}
static void
l2arc_rebuild_dev(l2arc_dev_t *dev, boolean_t reopen)
{
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
spa_t *spa = dev->l2ad_spa;
/*
* The L2ARC has to hold at least the payload of one log block for
* them to be restored (persistent L2ARC). The payload of a log block
* depends on the amount of its log entries. We always write log blocks
* with 1022 entries. How many of them are committed or restored depends
* on the size of the L2ARC device. Thus the maximum payload of
* one log block is 1022 * SPA_MAXBLOCKSIZE = 16GB. If the L2ARC device
* is less than that, we reduce the amount of committed and restored
* log entries per block so as to enable persistence.
*/
if (dev->l2ad_end < l2arc_rebuild_blocks_min_l2size) {
dev->l2ad_log_entries = 0;
} else {
dev->l2ad_log_entries = MIN((dev->l2ad_end -
dev->l2ad_start) >> SPA_MAXBLOCKSHIFT,
L2ARC_LOG_BLK_MAX_ENTRIES);
}
/*
* Read the device header, if an error is returned do not rebuild L2ARC.
*/
if (l2arc_dev_hdr_read(dev) == 0 && dev->l2ad_log_entries > 0) {
/*
* If we are onlining a cache device (vdev_reopen) that was
* still present (l2arc_vdev_present()) and rebuild is enabled,
* we should evict all ARC buffers and pointers to log blocks
* and reclaim their space before restoring its contents to
* L2ARC.
*/
if (reopen) {
if (!l2arc_rebuild_enabled) {
return;
} else {
l2arc_evict(dev, 0, B_TRUE);
/* start a new log block */
dev->l2ad_log_ent_idx = 0;
dev->l2ad_log_blk_payload_asize = 0;
dev->l2ad_log_blk_payload_start = 0;
}
}
/*
* Just mark the device as pending for a rebuild. We won't
* be starting a rebuild in line here as it would block pool
* import. Instead spa_load_impl will hand that off to an
* async task which will call l2arc_spa_rebuild_start.
*/
dev->l2ad_rebuild = B_TRUE;
} else if (spa_writeable(spa)) {
/*
* In this case TRIM the whole device if l2arc_trim_ahead > 0,
* otherwise create a new header. We zero out the memory holding
* the header to reset dh_start_lbps. If we TRIM the whole
* device the new header will be written by
* vdev_trim_l2arc_thread() at the end of the TRIM to update the
* trim_state in the header too. When reading the header, if
* trim_state is not VDEV_TRIM_COMPLETE and l2arc_trim_ahead > 0
* we opt to TRIM the whole device again.
*/
if (l2arc_trim_ahead > 0) {
dev->l2ad_trim_all = B_TRUE;
} else {
bzero(l2dhdr, l2dhdr_asize);
l2arc_dev_hdr_update(dev);
}
}
}
/*
* Add a vdev for use by the L2ARC. By this point the spa has already
* validated the vdev and opened it.
*/
void
l2arc_add_vdev(spa_t *spa, vdev_t *vd)
{
l2arc_dev_t *adddev;
uint64_t l2dhdr_asize;
ASSERT(!l2arc_vdev_present(vd));
/*
* Create a new l2arc device entry.
*/
adddev = vmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
adddev->l2ad_spa = spa;
adddev->l2ad_vdev = vd;
/* leave extra size for an l2arc device header */
l2dhdr_asize = adddev->l2ad_dev_hdr_asize =
MAX(sizeof (*adddev->l2ad_dev_hdr), 1 << vd->vdev_ashift);
adddev->l2ad_start = VDEV_LABEL_START_SIZE + l2dhdr_asize;
adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
ASSERT3U(adddev->l2ad_start, <, adddev->l2ad_end);
adddev->l2ad_hand = adddev->l2ad_start;
adddev->l2ad_evict = adddev->l2ad_start;
adddev->l2ad_first = B_TRUE;
adddev->l2ad_writing = B_FALSE;
adddev->l2ad_trim_all = B_FALSE;
list_link_init(&adddev->l2ad_node);
adddev->l2ad_dev_hdr = kmem_zalloc(l2dhdr_asize, KM_SLEEP);
mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL);
/*
* This is a list of all ARC buffers that are still valid on the
* device.
*/
list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
/*
* This is a list of pointers to log blocks that are still present
* on the device.
*/
list_create(&adddev->l2ad_lbptr_list, sizeof (l2arc_lb_ptr_buf_t),
offsetof(l2arc_lb_ptr_buf_t, node));
vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
zfs_refcount_create(&adddev->l2ad_alloc);
zfs_refcount_create(&adddev->l2ad_lb_asize);
zfs_refcount_create(&adddev->l2ad_lb_count);
/*
* Decide if dev is eligible for L2ARC rebuild or whole device
* trimming. This has to happen before the device is added in the
* cache device list and l2arc_dev_mtx is released. Otherwise
* l2arc_feed_thread() might already start writing on the
* device.
*/
l2arc_rebuild_dev(adddev, B_FALSE);
/*
* Add device to global list
*/
mutex_enter(&l2arc_dev_mtx);
list_insert_head(l2arc_dev_list, adddev);
atomic_inc_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
}
/*
* Decide if a vdev is eligible for L2ARC rebuild, called from vdev_reopen()
* in case of onlining a cache device.
*/
void
l2arc_rebuild_vdev(vdev_t *vd, boolean_t reopen)
{
l2arc_dev_t *dev = NULL;
dev = l2arc_vdev_get(vd);
ASSERT3P(dev, !=, NULL);
/*
* In contrast to l2arc_add_vdev() we do not have to worry about
* l2arc_feed_thread() invalidating previous content when onlining a
* cache device. The device parameters (l2ad*) are not cleared when
* offlining the device and writing new buffers will not invalidate
* all previous content. In worst case only buffers that have not had
* their log block written to the device will be lost.
* When onlining the cache device (ie offline->online without exporting
* the pool in between) this happens:
* vdev_reopen() -> vdev_open() -> l2arc_rebuild_vdev()
* | |
* vdev_is_dead() = B_FALSE l2ad_rebuild = B_TRUE
* During the time where vdev_is_dead = B_FALSE and until l2ad_rebuild
* is set to B_TRUE we might write additional buffers to the device.
*/
l2arc_rebuild_dev(dev, reopen);
}
/*
* Remove a vdev from the L2ARC.
*/
void
l2arc_remove_vdev(vdev_t *vd)
{
l2arc_dev_t *remdev = NULL;
/*
* Find the device by vdev
*/
remdev = l2arc_vdev_get(vd);
ASSERT3P(remdev, !=, NULL);
/*
* Cancel any ongoing or scheduled rebuild.
*/
mutex_enter(&l2arc_rebuild_thr_lock);
if (remdev->l2ad_rebuild_began == B_TRUE) {
remdev->l2ad_rebuild_cancel = B_TRUE;
while (remdev->l2ad_rebuild == B_TRUE)
cv_wait(&l2arc_rebuild_thr_cv, &l2arc_rebuild_thr_lock);
}
mutex_exit(&l2arc_rebuild_thr_lock);
/*
* Remove device from global list
*/
mutex_enter(&l2arc_dev_mtx);
list_remove(l2arc_dev_list, remdev);
l2arc_dev_last = NULL; /* may have been invalidated */
atomic_dec_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
/*
* Clear all buflists and ARC references. L2ARC device flush.
*/
l2arc_evict(remdev, 0, B_TRUE);
list_destroy(&remdev->l2ad_buflist);
ASSERT(list_is_empty(&remdev->l2ad_lbptr_list));
list_destroy(&remdev->l2ad_lbptr_list);
mutex_destroy(&remdev->l2ad_mtx);
zfs_refcount_destroy(&remdev->l2ad_alloc);
zfs_refcount_destroy(&remdev->l2ad_lb_asize);
zfs_refcount_destroy(&remdev->l2ad_lb_count);
kmem_free(remdev->l2ad_dev_hdr, remdev->l2ad_dev_hdr_asize);
vmem_free(remdev, sizeof (l2arc_dev_t));
}
void
l2arc_init(void)
{
l2arc_thread_exit = 0;
l2arc_ndev = 0;
mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_rebuild_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_rebuild_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
l2arc_dev_list = &L2ARC_dev_list;
l2arc_free_on_write = &L2ARC_free_on_write;
list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
offsetof(l2arc_dev_t, l2ad_node));
list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
offsetof(l2arc_data_free_t, l2df_list_node));
}
void
l2arc_fini(void)
{
mutex_destroy(&l2arc_feed_thr_lock);
cv_destroy(&l2arc_feed_thr_cv);
mutex_destroy(&l2arc_rebuild_thr_lock);
cv_destroy(&l2arc_rebuild_thr_cv);
mutex_destroy(&l2arc_dev_mtx);
mutex_destroy(&l2arc_free_on_write_mtx);
list_destroy(l2arc_dev_list);
list_destroy(l2arc_free_on_write);
}
void
l2arc_start(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
(void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
TS_RUN, defclsyspri);
}
void
l2arc_stop(void)
{
if (!(spa_mode_global & SPA_MODE_WRITE))
return;
mutex_enter(&l2arc_feed_thr_lock);
cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
l2arc_thread_exit = 1;
while (l2arc_thread_exit != 0)
cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
mutex_exit(&l2arc_feed_thr_lock);
}
/*
* Punches out rebuild threads for the L2ARC devices in a spa. This should
* be called after pool import from the spa async thread, since starting
* these threads directly from spa_import() will make them part of the
* "zpool import" context and delay process exit (and thus pool import).
*/
void
l2arc_spa_rebuild_start(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Locate the spa's l2arc devices and kick off rebuild threads.
*/
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
l2arc_dev_t *dev =
l2arc_vdev_get(spa->spa_l2cache.sav_vdevs[i]);
if (dev == NULL) {
/* Don't attempt a rebuild if the vdev is UNAVAIL */
continue;
}
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild && !dev->l2ad_rebuild_cancel) {
dev->l2ad_rebuild_began = B_TRUE;
(void) thread_create(NULL, 0, l2arc_dev_rebuild_thread,
dev, 0, &p0, TS_RUN, minclsyspri);
}
mutex_exit(&l2arc_rebuild_thr_lock);
}
}
/*
* Main entry point for L2ARC rebuilding.
*/
static void
l2arc_dev_rebuild_thread(void *arg)
{
l2arc_dev_t *dev = arg;
VERIFY(!dev->l2ad_rebuild_cancel);
VERIFY(dev->l2ad_rebuild);
(void) l2arc_rebuild(dev);
mutex_enter(&l2arc_rebuild_thr_lock);
dev->l2ad_rebuild_began = B_FALSE;
dev->l2ad_rebuild = B_FALSE;
mutex_exit(&l2arc_rebuild_thr_lock);
thread_exit();
}
/*
* This function implements the actual L2ARC metadata rebuild. It:
* starts reading the log block chain and restores each block's contents
* to memory (reconstructing arc_buf_hdr_t's).
*
* Operation stops under any of the following conditions:
*
* 1) We reach the end of the log block chain.
* 2) We encounter *any* error condition (cksum errors, io errors)
*/
static int
l2arc_rebuild(l2arc_dev_t *dev)
{
vdev_t *vd = dev->l2ad_vdev;
spa_t *spa = vd->vdev_spa;
int err = 0;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
l2arc_log_blk_phys_t *this_lb, *next_lb;
zio_t *this_io = NULL, *next_io = NULL;
l2arc_log_blkptr_t lbps[2];
l2arc_lb_ptr_buf_t *lb_ptr_buf;
boolean_t lock_held;
this_lb = vmem_zalloc(sizeof (*this_lb), KM_SLEEP);
next_lb = vmem_zalloc(sizeof (*next_lb), KM_SLEEP);
/*
* We prevent device removal while issuing reads to the device,
* then during the rebuilding phases we drop this lock again so
* that a spa_unload or device remove can be initiated - this is
* safe, because the spa will signal us to stop before removing
* our device and wait for us to stop.
*/
spa_config_enter(spa, SCL_L2ARC, vd, RW_READER);
lock_held = B_TRUE;
/*
* Retrieve the persistent L2ARC device state.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
dev->l2ad_evict = MAX(l2dhdr->dh_evict, dev->l2ad_start);
dev->l2ad_hand = MAX(l2dhdr->dh_start_lbps[0].lbp_daddr +
L2BLK_GET_PSIZE((&l2dhdr->dh_start_lbps[0])->lbp_prop),
dev->l2ad_start);
dev->l2ad_first = !!(l2dhdr->dh_flags & L2ARC_DEV_HDR_EVICT_FIRST);
vd->vdev_trim_action_time = l2dhdr->dh_trim_action_time;
vd->vdev_trim_state = l2dhdr->dh_trim_state;
/*
* In case the zfs module parameter l2arc_rebuild_enabled is false
* we do not start the rebuild process.
*/
if (!l2arc_rebuild_enabled)
goto out;
/* Prepare the rebuild process */
bcopy(l2dhdr->dh_start_lbps, lbps, sizeof (lbps));
/* Start the rebuild process */
for (;;) {
if (!l2arc_log_blkptr_valid(dev, &lbps[0]))
break;
if ((err = l2arc_log_blk_read(dev, &lbps[0], &lbps[1],
this_lb, next_lb, this_io, &next_io)) != 0)
goto out;
/*
* Our memory pressure valve. If the system is running low
* on memory, rather than swamping memory with new ARC buf
* hdrs, we opt not to rebuild the L2ARC. At this point,
* however, we have already set up our L2ARC dev to chain in
* new metadata log blocks, so the user may choose to offline/
* online the L2ARC dev at a later time (or re-import the pool)
* to reconstruct it (when there's less memory pressure).
*/
if (l2arc_hdr_limit_reached()) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_lowmem);
cmn_err(CE_NOTE, "System running low on memory, "
"aborting L2ARC rebuild.");
err = SET_ERROR(ENOMEM);
goto out;
}
spa_config_exit(spa, SCL_L2ARC, vd);
lock_held = B_FALSE;
/*
* Now that we know that the next_lb checks out alright, we
* can start reconstruction from this log block.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
uint64_t asize = L2BLK_GET_PSIZE((&lbps[0])->lbp_prop);
l2arc_log_blk_restore(dev, this_lb, asize);
/*
* log block restored, include its pointer in the list of
* pointers to log blocks present in the L2ARC device.
*/
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t),
KM_SLEEP);
bcopy(&lbps[0], lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(vd, asize, 0, 0);
/*
* Protection against loops of log blocks:
*
* l2ad_hand l2ad_evict
* V V
* l2ad_start |=======================================| l2ad_end
* -----|||----|||---|||----|||
* (3) (2) (1) (0)
* ---|||---|||----|||---|||
* (7) (6) (5) (4)
*
* In this situation the pointer of log block (4) passes
* l2arc_log_blkptr_valid() but the log block should not be
* restored as it is overwritten by the payload of log block
* (0). Only log blocks (0)-(3) should be restored. We check
* whether l2ad_evict lies in between the payload starting
* offset of the next log block (lbps[1].lbp_payload_start)
* and the payload starting offset of the present log block
* (lbps[0].lbp_payload_start). If true and this isn't the
* first pass, we are looping from the beginning and we should
* stop.
*/
if (l2arc_range_check_overlap(lbps[1].lbp_payload_start,
lbps[0].lbp_payload_start, dev->l2ad_evict) &&
!dev->l2ad_first)
goto out;
cond_resched();
for (;;) {
mutex_enter(&l2arc_rebuild_thr_lock);
if (dev->l2ad_rebuild_cancel) {
dev->l2ad_rebuild = B_FALSE;
cv_signal(&l2arc_rebuild_thr_cv);
mutex_exit(&l2arc_rebuild_thr_lock);
err = SET_ERROR(ECANCELED);
goto out;
}
mutex_exit(&l2arc_rebuild_thr_lock);
if (spa_config_tryenter(spa, SCL_L2ARC, vd,
RW_READER)) {
lock_held = B_TRUE;
break;
}
/*
* L2ARC config lock held by somebody in writer,
* possibly due to them trying to remove us. They'll
* likely to want us to shut down, so after a little
* delay, we check l2ad_rebuild_cancel and retry
* the lock again.
*/
delay(1);
}
/*
* Continue with the next log block.
*/
lbps[0] = lbps[1];
lbps[1] = this_lb->lb_prev_lbp;
PTR_SWAP(this_lb, next_lb);
this_io = next_io;
next_io = NULL;
}
if (this_io != NULL)
l2arc_log_blk_fetch_abort(this_io);
out:
if (next_io != NULL)
l2arc_log_blk_fetch_abort(next_io);
vmem_free(this_lb, sizeof (*this_lb));
vmem_free(next_lb, sizeof (*next_lb));
if (!l2arc_rebuild_enabled) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"disabled");
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) > 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_success);
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"successful, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err == 0 && zfs_refcount_count(&dev->l2ad_lb_count) == 0) {
/*
* No error but also nothing restored, meaning the lbps array
* in the device header points to invalid/non-present log
* blocks. Reset the header.
*/
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"no valid log blocks");
bzero(l2dhdr, dev->l2ad_dev_hdr_asize);
l2arc_dev_hdr_update(dev);
} else if (err == ECANCELED) {
/*
* In case the rebuild was canceled do not log to spa history
* log as the pool may be in the process of being removed.
*/
zfs_dbgmsg("L2ARC rebuild aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
} else if (err != 0) {
spa_history_log_internal(spa, "L2ARC rebuild", NULL,
"aborted, restored %llu blocks",
(u_longlong_t)zfs_refcount_count(&dev->l2ad_lb_count));
}
if (lock_held)
spa_config_exit(spa, SCL_L2ARC, vd);
return (err);
}
/*
* Attempts to read the device header on the provided L2ARC device and writes
* it to `hdr'. On success, this function returns 0, otherwise the appropriate
* error code is returned.
*/
static int
l2arc_dev_hdr_read(l2arc_dev_t *dev)
{
int err;
uint64_t guid;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
guid = spa_guid(dev->l2ad_vdev->vdev_spa);
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_read_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd,
ZIO_CHECKSUM_LABEL, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
ZIO_FLAG_SPECULATIVE, B_FALSE));
abd_free(abd);
if (err != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_dh_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
return (err);
}
if (l2dhdr->dh_magic == BSWAP_64(L2ARC_DEV_HDR_MAGIC))
byteswap_uint64_array(l2dhdr, sizeof (*l2dhdr));
if (l2dhdr->dh_magic != L2ARC_DEV_HDR_MAGIC ||
l2dhdr->dh_spa_guid != guid ||
l2dhdr->dh_vdev_guid != dev->l2ad_vdev->vdev_guid ||
l2dhdr->dh_version != L2ARC_PERSISTENT_VERSION ||
l2dhdr->dh_log_entries != dev->l2ad_log_entries ||
l2dhdr->dh_end != dev->l2ad_end ||
!l2arc_range_check_overlap(dev->l2ad_start, dev->l2ad_end,
l2dhdr->dh_evict) ||
(l2dhdr->dh_trim_state != VDEV_TRIM_COMPLETE &&
l2arc_trim_ahead > 0)) {
/*
* Attempt to rebuild a device containing no actual dev hdr
* or containing a header from some other pool or from another
* version of persistent L2ARC.
*/
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_unsupported);
return (SET_ERROR(ENOTSUP));
}
return (0);
}
/*
* Reads L2ARC log blocks from storage and validates their contents.
*
* This function implements a simple fetcher to make sure that while
* we're processing one buffer the L2ARC is already fetching the next
* one in the chain.
*
* The arguments this_lp and next_lp point to the current and next log block
* address in the block chain. Similarly, this_lb and next_lb hold the
* l2arc_log_blk_phys_t's of the current and next L2ARC blk.
*
* The `this_io' and `next_io' arguments are used for block fetching.
* When issuing the first blk IO during rebuild, you should pass NULL for
* `this_io'. This function will then issue a sync IO to read the block and
* also issue an async IO to fetch the next block in the block chain. The
* fetched IO is returned in `next_io'. On subsequent calls to this
* function, pass the value returned in `next_io' from the previous call
* as `this_io' and a fresh `next_io' pointer to hold the next fetch IO.
* Prior to the call, you should initialize your `next_io' pointer to be
* NULL. If no fetch IO was issued, the pointer is left set at NULL.
*
* On success, this function returns 0, otherwise it returns an appropriate
* error code. On error the fetching IO is aborted and cleared before
* returning from this function. Therefore, if we return `success', the
* caller can assume that we have taken care of cleanup of fetch IOs.
*/
static int
l2arc_log_blk_read(l2arc_dev_t *dev,
const l2arc_log_blkptr_t *this_lbp, const l2arc_log_blkptr_t *next_lbp,
l2arc_log_blk_phys_t *this_lb, l2arc_log_blk_phys_t *next_lb,
zio_t *this_io, zio_t **next_io)
{
int err = 0;
zio_cksum_t cksum;
abd_t *abd = NULL;
uint64_t asize;
ASSERT(this_lbp != NULL && next_lbp != NULL);
ASSERT(this_lb != NULL && next_lb != NULL);
ASSERT(next_io != NULL && *next_io == NULL);
ASSERT(l2arc_log_blkptr_valid(dev, this_lbp));
/*
* Check to see if we have issued the IO for this log block in a
* previous run. If not, this is the first call, so issue it now.
*/
if (this_io == NULL) {
this_io = l2arc_log_blk_fetch(dev->l2ad_vdev, this_lbp,
this_lb);
}
/*
* Peek to see if we can start issuing the next IO immediately.
*/
if (l2arc_log_blkptr_valid(dev, next_lbp)) {
/*
* Start issuing IO for the next log block early - this
* should help keep the L2ARC device busy while we
* decompress and restore this log block.
*/
*next_io = l2arc_log_blk_fetch(dev->l2ad_vdev, next_lbp,
next_lb);
}
/* Wait for the IO to read this log block to complete */
if ((err = zio_wait(this_io)) != 0) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_io_errors);
zfs_dbgmsg("L2ARC IO error (%d) while reading log block, "
"offset: %llu, vdev guid: %llu", err,
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
goto cleanup;
}
/*
* Make sure the buffer checks out.
* L2BLK_GET_PSIZE returns aligned size for log blocks.
*/
asize = L2BLK_GET_PSIZE((this_lbp)->lbp_prop);
fletcher_4_native(this_lb, asize, NULL, &cksum);
if (!ZIO_CHECKSUM_EQUAL(cksum, this_lbp->lbp_cksum)) {
ARCSTAT_BUMP(arcstat_l2_rebuild_abort_cksum_lb_errors);
zfs_dbgmsg("L2ARC log block cksum failed, offset: %llu, "
"vdev guid: %llu, l2ad_hand: %llu, l2ad_evict: %llu",
(u_longlong_t)this_lbp->lbp_daddr,
(u_longlong_t)dev->l2ad_vdev->vdev_guid,
(u_longlong_t)dev->l2ad_hand,
(u_longlong_t)dev->l2ad_evict);
err = SET_ERROR(ECKSUM);
goto cleanup;
}
/* Now we can take our time decoding this buffer */
switch (L2BLK_GET_COMPRESS((this_lbp)->lbp_prop)) {
case ZIO_COMPRESS_OFF:
break;
case ZIO_COMPRESS_LZ4:
abd = abd_alloc_for_io(asize, B_TRUE);
abd_copy_from_buf_off(abd, this_lb, 0, asize);
if ((err = zio_decompress_data(
L2BLK_GET_COMPRESS((this_lbp)->lbp_prop),
abd, this_lb, asize, sizeof (*this_lb), NULL)) != 0) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
break;
default:
err = SET_ERROR(EINVAL);
goto cleanup;
}
if (this_lb->lb_magic == BSWAP_64(L2ARC_LOG_BLK_MAGIC))
byteswap_uint64_array(this_lb, sizeof (*this_lb));
if (this_lb->lb_magic != L2ARC_LOG_BLK_MAGIC) {
err = SET_ERROR(EINVAL);
goto cleanup;
}
cleanup:
/* Abort an in-flight fetch I/O in case of error */
if (err != 0 && *next_io != NULL) {
l2arc_log_blk_fetch_abort(*next_io);
*next_io = NULL;
}
if (abd != NULL)
abd_free(abd);
return (err);
}
/*
* Restores the payload of a log block to ARC. This creates empty ARC hdr
* entries which only contain an l2arc hdr, essentially restoring the
* buffers to their L2ARC evicted state. This function also updates space
* usage on the L2ARC vdev to make sure it tracks restored buffers.
*/
static void
l2arc_log_blk_restore(l2arc_dev_t *dev, const l2arc_log_blk_phys_t *lb,
uint64_t lb_asize)
{
uint64_t size = 0, asize = 0;
uint64_t log_entries = dev->l2ad_log_entries;
/*
* Usually arc_adapt() is called only for data, not headers, but
* since we may allocate significant amount of memory here, let ARC
* grow its arc_c.
*/
arc_adapt(log_entries * HDR_L2ONLY_SIZE, arc_l2c_only);
for (int i = log_entries - 1; i >= 0; i--) {
/*
* Restore goes in the reverse temporal direction to preserve
* correct temporal ordering of buffers in the l2ad_buflist.
* l2arc_hdr_restore also does a list_insert_tail instead of
* list_insert_head on the l2ad_buflist:
*
* LIST l2ad_buflist LIST
* HEAD <------ (time) ------ TAIL
* direction +-----+-----+-----+-----+-----+ direction
* of l2arc <== | buf | buf | buf | buf | buf | ===> of rebuild
* fill +-----+-----+-----+-----+-----+
* ^ ^
* | |
* | |
* l2arc_feed_thread l2arc_rebuild
* will place new bufs here restores bufs here
*
* During l2arc_rebuild() the device is not used by
* l2arc_feed_thread() as dev->l2ad_rebuild is set to true.
*/
size += L2BLK_GET_LSIZE((&lb->lb_entries[i])->le_prop);
asize += vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((&lb->lb_entries[i])->le_prop));
l2arc_hdr_restore(&lb->lb_entries[i], dev);
}
/*
* Record rebuild stats:
* size Logical size of restored buffers in the L2ARC
* asize Aligned size of restored buffers in the L2ARC
*/
ARCSTAT_INCR(arcstat_l2_rebuild_size, size);
ARCSTAT_INCR(arcstat_l2_rebuild_asize, asize);
ARCSTAT_INCR(arcstat_l2_rebuild_bufs, log_entries);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, lb_asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio, asize / lb_asize);
ARCSTAT_BUMP(arcstat_l2_rebuild_log_blks);
}
/*
* Restores a single ARC buf hdr from a log entry. The ARC buffer is put
* into a state indicating that it has been evicted to L2ARC.
*/
static void
l2arc_hdr_restore(const l2arc_log_ent_phys_t *le, l2arc_dev_t *dev)
{
arc_buf_hdr_t *hdr, *exists;
kmutex_t *hash_lock;
arc_buf_contents_t type = L2BLK_GET_TYPE((le)->le_prop);
uint64_t asize;
/*
* Do all the allocation before grabbing any locks, this lets us
* sleep if memory is full and we don't have to deal with failed
* allocations.
*/
hdr = arc_buf_alloc_l2only(L2BLK_GET_LSIZE((le)->le_prop), type,
dev, le->le_dva, le->le_daddr,
L2BLK_GET_PSIZE((le)->le_prop), le->le_birth,
L2BLK_GET_COMPRESS((le)->le_prop), le->le_complevel,
L2BLK_GET_PROTECTED((le)->le_prop),
L2BLK_GET_PREFETCH((le)->le_prop),
L2BLK_GET_STATE((le)->le_prop));
asize = vdev_psize_to_asize(dev->l2ad_vdev,
L2BLK_GET_PSIZE((le)->le_prop));
/*
* vdev_space_update() has to be called before arc_hdr_destroy() to
* avoid underflow since the latter also calls vdev_space_update().
*/
l2arc_hdr_arcstats_increment(hdr);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, hdr);
(void) zfs_refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
mutex_exit(&dev->l2ad_mtx);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists) {
/* Buffer was already cached, no need to restore it. */
arc_hdr_destroy(hdr);
/*
* If the buffer is already cached, check whether it has
* L2ARC metadata. If not, enter them and update the flag.
* This is important is case of onlining a cache device, since
* we previously evicted all L2ARC metadata from ARC.
*/
if (!HDR_HAS_L2HDR(exists)) {
arc_hdr_set_flags(exists, ARC_FLAG_HAS_L2HDR);
exists->b_l2hdr.b_dev = dev;
exists->b_l2hdr.b_daddr = le->le_daddr;
exists->b_l2hdr.b_arcs_state =
L2BLK_GET_STATE((le)->le_prop);
mutex_enter(&dev->l2ad_mtx);
list_insert_tail(&dev->l2ad_buflist, exists);
(void) zfs_refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(exists), exists);
mutex_exit(&dev->l2ad_mtx);
l2arc_hdr_arcstats_increment(exists);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
}
ARCSTAT_BUMP(arcstat_l2_rebuild_bufs_precached);
}
mutex_exit(hash_lock);
}
/*
* Starts an asynchronous read IO to read a log block. This is used in log
* block reconstruction to start reading the next block before we are done
* decoding and reconstructing the current block, to keep the l2arc device
* nice and hot with read IO to process.
* The returned zio will contain a newly allocated memory buffers for the IO
* data which should then be freed by the caller once the zio is no longer
* needed (i.e. due to it having completed). If you wish to abort this
* zio, you should do so using l2arc_log_blk_fetch_abort, which takes
* care of disposing of the allocated buffers correctly.
*/
static zio_t *
l2arc_log_blk_fetch(vdev_t *vd, const l2arc_log_blkptr_t *lbp,
l2arc_log_blk_phys_t *lb)
{
uint32_t asize;
zio_t *pio;
l2arc_read_callback_t *cb;
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
ASSERT(asize <= sizeof (l2arc_log_blk_phys_t));
cb = kmem_zalloc(sizeof (l2arc_read_callback_t), KM_SLEEP);
cb->l2rcb_abd = abd_get_from_buf(lb, asize);
pio = zio_root(vd->vdev_spa, l2arc_blk_fetch_done, cb,
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY);
(void) zio_nowait(zio_read_phys(pio, vd, lbp->lbp_daddr, asize,
cb->l2rcb_abd, ZIO_CHECKSUM_OFF, NULL, NULL,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY, B_FALSE));
return (pio);
}
/*
* Aborts a zio returned from l2arc_log_blk_fetch and frees the data
* buffers allocated for it.
*/
static void
l2arc_log_blk_fetch_abort(zio_t *zio)
{
(void) zio_wait(zio);
}
/*
* Creates a zio to update the device header on an l2arc device.
*/
void
l2arc_dev_hdr_update(l2arc_dev_t *dev)
{
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
const uint64_t l2dhdr_asize = dev->l2ad_dev_hdr_asize;
abd_t *abd;
int err;
VERIFY(spa_config_held(dev->l2ad_spa, SCL_STATE_ALL, RW_READER));
l2dhdr->dh_magic = L2ARC_DEV_HDR_MAGIC;
l2dhdr->dh_version = L2ARC_PERSISTENT_VERSION;
l2dhdr->dh_spa_guid = spa_guid(dev->l2ad_vdev->vdev_spa);
l2dhdr->dh_vdev_guid = dev->l2ad_vdev->vdev_guid;
l2dhdr->dh_log_entries = dev->l2ad_log_entries;
l2dhdr->dh_evict = dev->l2ad_evict;
l2dhdr->dh_start = dev->l2ad_start;
l2dhdr->dh_end = dev->l2ad_end;
l2dhdr->dh_lb_asize = zfs_refcount_count(&dev->l2ad_lb_asize);
l2dhdr->dh_lb_count = zfs_refcount_count(&dev->l2ad_lb_count);
l2dhdr->dh_flags = 0;
l2dhdr->dh_trim_action_time = dev->l2ad_vdev->vdev_trim_action_time;
l2dhdr->dh_trim_state = dev->l2ad_vdev->vdev_trim_state;
if (dev->l2ad_first)
l2dhdr->dh_flags |= L2ARC_DEV_HDR_EVICT_FIRST;
abd = abd_get_from_buf(l2dhdr, l2dhdr_asize);
err = zio_wait(zio_write_phys(NULL, dev->l2ad_vdev,
VDEV_LABEL_START_SIZE, l2dhdr_asize, abd, ZIO_CHECKSUM_LABEL, NULL,
NULL, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE));
abd_free(abd);
if (err != 0) {
zfs_dbgmsg("L2ARC IO error (%d) while writing device header, "
"vdev guid: %llu", err,
(u_longlong_t)dev->l2ad_vdev->vdev_guid);
}
}
/*
* Commits a log block to the L2ARC device. This routine is invoked from
* l2arc_write_buffers when the log block fills up.
* This function allocates some memory to temporarily hold the serialized
* buffer to be written. This is then released in l2arc_write_done.
*/
static void
l2arc_log_blk_commit(l2arc_dev_t *dev, zio_t *pio, l2arc_write_callback_t *cb)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_dev_hdr_phys_t *l2dhdr = dev->l2ad_dev_hdr;
uint64_t psize, asize;
zio_t *wzio;
l2arc_lb_abd_buf_t *abd_buf;
uint8_t *tmpbuf;
l2arc_lb_ptr_buf_t *lb_ptr_buf;
VERIFY3S(dev->l2ad_log_ent_idx, ==, dev->l2ad_log_entries);
tmpbuf = zio_buf_alloc(sizeof (*lb));
abd_buf = zio_buf_alloc(sizeof (*abd_buf));
abd_buf->abd = abd_get_from_buf(lb, sizeof (*lb));
lb_ptr_buf = kmem_zalloc(sizeof (l2arc_lb_ptr_buf_t), KM_SLEEP);
lb_ptr_buf->lb_ptr = kmem_zalloc(sizeof (l2arc_log_blkptr_t), KM_SLEEP);
/* link the buffer into the block chain */
lb->lb_prev_lbp = l2dhdr->dh_start_lbps[1];
lb->lb_magic = L2ARC_LOG_BLK_MAGIC;
/*
* l2arc_log_blk_commit() may be called multiple times during a single
* l2arc_write_buffers() call. Save the allocated abd buffers in a list
* so we can free them in l2arc_write_done() later on.
*/
list_insert_tail(&cb->l2wcb_abd_list, abd_buf);
/* try to compress the buffer */
psize = zio_compress_data(ZIO_COMPRESS_LZ4,
abd_buf->abd, tmpbuf, sizeof (*lb), 0);
/* a log block is never entirely zero */
ASSERT(psize != 0);
asize = vdev_psize_to_asize(dev->l2ad_vdev, psize);
ASSERT(asize <= sizeof (*lb));
/*
* Update the start log block pointer in the device header to point
* to the log block we're about to write.
*/
l2dhdr->dh_start_lbps[1] = l2dhdr->dh_start_lbps[0];
l2dhdr->dh_start_lbps[0].lbp_daddr = dev->l2ad_hand;
l2dhdr->dh_start_lbps[0].lbp_payload_asize =
dev->l2ad_log_blk_payload_asize;
l2dhdr->dh_start_lbps[0].lbp_payload_start =
dev->l2ad_log_blk_payload_start;
L2BLK_SET_LSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, sizeof (*lb));
L2BLK_SET_PSIZE(
(&l2dhdr->dh_start_lbps[0])->lbp_prop, asize);
L2BLK_SET_CHECKSUM(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_CHECKSUM_FLETCHER_4);
if (asize < sizeof (*lb)) {
/* compression succeeded */
bzero(tmpbuf + psize, asize - psize);
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_LZ4);
} else {
/* compression failed */
bcopy(lb, tmpbuf, sizeof (*lb));
L2BLK_SET_COMPRESS(
(&l2dhdr->dh_start_lbps[0])->lbp_prop,
ZIO_COMPRESS_OFF);
}
/* checksum what we're about to write */
fletcher_4_native(tmpbuf, asize, NULL,
&l2dhdr->dh_start_lbps[0].lbp_cksum);
abd_free(abd_buf->abd);
/* perform the write itself */
abd_buf->abd = abd_get_from_buf(tmpbuf, sizeof (*lb));
abd_take_ownership_of_buf(abd_buf->abd, B_TRUE);
wzio = zio_write_phys(pio, dev->l2ad_vdev, dev->l2ad_hand,
asize, abd_buf->abd, ZIO_CHECKSUM_OFF, NULL, NULL,
ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_CANFAIL, B_FALSE);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, zio_t *, wzio);
(void) zio_nowait(wzio);
dev->l2ad_hand += asize;
/*
* Include the committed log block's pointer in the list of pointers
* to log blocks present in the L2ARC device.
*/
bcopy(&l2dhdr->dh_start_lbps[0], lb_ptr_buf->lb_ptr,
sizeof (l2arc_log_blkptr_t));
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_lbptr_list, lb_ptr_buf);
ARCSTAT_INCR(arcstat_l2_log_blk_asize, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_count);
zfs_refcount_add_many(&dev->l2ad_lb_asize, asize, lb_ptr_buf);
zfs_refcount_add(&dev->l2ad_lb_count, lb_ptr_buf);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(dev->l2ad_vdev, asize, 0, 0);
/* bump the kstats */
ARCSTAT_INCR(arcstat_l2_write_bytes, asize);
ARCSTAT_BUMP(arcstat_l2_log_blk_writes);
ARCSTAT_F_AVG(arcstat_l2_log_blk_avg_asize, asize);
ARCSTAT_F_AVG(arcstat_l2_data_to_meta_ratio,
dev->l2ad_log_blk_payload_asize / asize);
/* start a new log block */
dev->l2ad_log_ent_idx = 0;
dev->l2ad_log_blk_payload_asize = 0;
dev->l2ad_log_blk_payload_start = 0;
}
/*
* Validates an L2ARC log block address to make sure that it can be read
* from the provided L2ARC device.
*/
boolean_t
l2arc_log_blkptr_valid(l2arc_dev_t *dev, const l2arc_log_blkptr_t *lbp)
{
/* L2BLK_GET_PSIZE returns aligned size for log blocks */
uint64_t asize = L2BLK_GET_PSIZE((lbp)->lbp_prop);
uint64_t end = lbp->lbp_daddr + asize - 1;
uint64_t start = lbp->lbp_payload_start;
boolean_t evicted = B_FALSE;
/*
* A log block is valid if all of the following conditions are true:
* - it fits entirely (including its payload) between l2ad_start and
* l2ad_end
* - it has a valid size
* - neither the log block itself nor part of its payload was evicted
* by l2arc_evict():
*
* l2ad_hand l2ad_evict
* | | lbp_daddr
* | start | | end
* | | | | |
* V V V V V
* l2ad_start ============================================ l2ad_end
* --------------------------||||
* ^ ^
* | log block
* payload
*/
evicted =
l2arc_range_check_overlap(start, end, dev->l2ad_hand) ||
l2arc_range_check_overlap(start, end, dev->l2ad_evict) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, start) ||
l2arc_range_check_overlap(dev->l2ad_hand, dev->l2ad_evict, end);
return (start >= dev->l2ad_start && end <= dev->l2ad_end &&
asize > 0 && asize <= sizeof (l2arc_log_blk_phys_t) &&
(!evicted || dev->l2ad_first));
}
/*
* Inserts ARC buffer header `hdr' into the current L2ARC log block on
* the device. The buffer being inserted must be present in L2ARC.
* Returns B_TRUE if the L2ARC log block is full and needs to be committed
* to L2ARC, or B_FALSE if it still has room for more ARC buffers.
*/
static boolean_t
l2arc_log_blk_insert(l2arc_dev_t *dev, const arc_buf_hdr_t *hdr)
{
l2arc_log_blk_phys_t *lb = &dev->l2ad_log_blk;
l2arc_log_ent_phys_t *le;
if (dev->l2ad_log_entries == 0)
return (B_FALSE);
int index = dev->l2ad_log_ent_idx++;
ASSERT3S(index, <, dev->l2ad_log_entries);
ASSERT(HDR_HAS_L2HDR(hdr));
le = &lb->lb_entries[index];
bzero(le, sizeof (*le));
le->le_dva = hdr->b_dva;
le->le_birth = hdr->b_birth;
le->le_daddr = hdr->b_l2hdr.b_daddr;
if (index == 0)
dev->l2ad_log_blk_payload_start = le->le_daddr;
L2BLK_SET_LSIZE((le)->le_prop, HDR_GET_LSIZE(hdr));
L2BLK_SET_PSIZE((le)->le_prop, HDR_GET_PSIZE(hdr));
L2BLK_SET_COMPRESS((le)->le_prop, HDR_GET_COMPRESS(hdr));
le->le_complevel = hdr->b_complevel;
L2BLK_SET_TYPE((le)->le_prop, hdr->b_type);
L2BLK_SET_PROTECTED((le)->le_prop, !!(HDR_PROTECTED(hdr)));
L2BLK_SET_PREFETCH((le)->le_prop, !!(HDR_PREFETCH(hdr)));
L2BLK_SET_STATE((le)->le_prop, hdr->b_l1hdr.b_state->arcs_state);
dev->l2ad_log_blk_payload_asize += vdev_psize_to_asize(dev->l2ad_vdev,
HDR_GET_PSIZE(hdr));
return (dev->l2ad_log_ent_idx == dev->l2ad_log_entries);
}
/*
* Checks whether a given L2ARC device address sits in a time-sequential
* range. The trick here is that the L2ARC is a rotary buffer, so we can't
* just do a range comparison, we need to handle the situation in which the
* range wraps around the end of the L2ARC device. Arguments:
* bottom -- Lower end of the range to check (written to earlier).
* top -- Upper end of the range to check (written to later).
* check -- The address for which we want to determine if it sits in
* between the top and bottom.
*
* The 3-way conditional below represents the following cases:
*
* bottom < top : Sequentially ordered case:
* <check>--------+-------------------+
* | (overlap here?) |
* L2ARC dev V V
* |---------------<bottom>============<top>--------------|
*
* bottom > top: Looped-around case:
* <check>--------+------------------+
* | (overlap here?) |
* L2ARC dev V V
* |===============<top>---------------<bottom>===========|
* ^ ^
* | (or here?) |
* +---------------+---------<check>
*
* top == bottom : Just a single address comparison.
*/
boolean_t
l2arc_range_check_overlap(uint64_t bottom, uint64_t top, uint64_t check)
{
if (bottom < top)
return (bottom <= check && check <= top);
else if (bottom > top)
return (check <= top || bottom <= check);
else
return (check == top);
}
EXPORT_SYMBOL(arc_buf_size);
EXPORT_SYMBOL(arc_write);
EXPORT_SYMBOL(arc_read);
EXPORT_SYMBOL(arc_buf_info);
EXPORT_SYMBOL(arc_getbuf_func);
EXPORT_SYMBOL(arc_add_prune_callback);
EXPORT_SYMBOL(arc_remove_prune_callback);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min, param_set_arc_min,
param_get_long, ZMOD_RW, "Min arc size");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, max, param_set_arc_max,
param_get_long, ZMOD_RW, "Max arc size");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit, param_set_arc_long,
param_get_long, ZMOD_RW, "Metadata limit for arc size");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_limit_percent,
param_set_arc_long, param_get_long, ZMOD_RW,
"Percent of arc size for arc meta limit");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, meta_min, param_set_arc_long,
param_get_long, ZMOD_RW, "Min arc metadata");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_prune, INT, ZMOD_RW,
"Meta objects to scan for prune");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_adjust_restarts, INT, ZMOD_RW,
"Limit number of restarts in arc_evict_meta");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, meta_strategy, INT, ZMOD_RW,
"Meta reclaim strategy");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, grow_retry, param_set_arc_int,
param_get_int, ZMOD_RW, "Seconds before growing arc size");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, p_dampener_disable, INT, ZMOD_RW,
"Disable arc_p adapt dampener");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, shrink_shift, param_set_arc_int,
param_get_int, ZMOD_RW, "log2(fraction of arc to reclaim)");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, pc_percent, UINT, ZMOD_RW,
"Percent of pagecache to reclaim arc to");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, p_min_shift, param_set_arc_int,
param_get_int, ZMOD_RW, "arc_c shift to calc min/max arc_p");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, average_blocksize, INT, ZMOD_RD,
"Target average block size");
ZFS_MODULE_PARAM(zfs, zfs_, compressed_arc_enabled, INT, ZMOD_RW,
"Disable compressed arc buffers");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prefetch_ms, param_set_arc_int,
param_get_int, ZMOD_RW, "Min life of prefetch block in ms");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, min_prescient_prefetch_ms,
param_set_arc_int, param_get_int, ZMOD_RW,
"Min life of prescient prefetched block in ms");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_max, ULONG, ZMOD_RW,
"Max write bytes per interval");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, write_boost, ULONG, ZMOD_RW,
"Extra write bytes during device warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom, ULONG, ZMOD_RW,
"Number of max device writes to precache");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, headroom_boost, ULONG, ZMOD_RW,
"Compressed l2arc_headroom multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, trim_ahead, ULONG, ZMOD_RW,
"TRIM ahead L2ARC write size multiplier");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_secs, ULONG, ZMOD_RW,
"Seconds between L2ARC writing");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_min_ms, ULONG, ZMOD_RW,
"Min feed interval in milliseconds");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, noprefetch, INT, ZMOD_RW,
"Skip caching prefetched buffers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, feed_again, INT, ZMOD_RW,
"Turbo L2ARC warmup");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, norw, INT, ZMOD_RW,
"No reads during writes");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, meta_percent, INT, ZMOD_RW,
"Percent of ARC size allowed for L2ARC-only headers");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_enabled, INT, ZMOD_RW,
"Rebuild the L2ARC when importing a pool");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, rebuild_blocks_min_l2size, ULONG, ZMOD_RW,
"Min size in bytes to write rebuild log blocks in L2ARC");
ZFS_MODULE_PARAM(zfs_l2arc, l2arc_, mfuonly, INT, ZMOD_RW,
"Cache only MFU data from ARC into L2ARC");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, lotsfree_percent, param_set_arc_int,
param_get_int, ZMOD_RW, "System free memory I/O throttle in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, sys_free, param_set_arc_long,
param_get_long, ZMOD_RW, "System free memory target size in bytes");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit, param_set_arc_long,
param_get_long, ZMOD_RW, "Minimum bytes of dnodes in arc");
ZFS_MODULE_PARAM_CALL(zfs_arc, zfs_arc_, dnode_limit_percent,
param_set_arc_long, param_get_long, ZMOD_RW,
"Percent of ARC meta buffers for dnodes");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, dnode_reduce_percent, ULONG, ZMOD_RW,
"Percentage of excess dnodes to try to unpin");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, eviction_pct, INT, ZMOD_RW,
"When full, ARC allocation waits for eviction of this % of alloc size");
ZFS_MODULE_PARAM(zfs_arc, zfs_arc_, evict_batch_limit, INT, ZMOD_RW,
"The number of headers to evict per sublist before moving to the next");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dmu_send.c b/sys/contrib/openzfs/module/zfs/dmu_send.c
index d654382237c0..0658e13c2d25 100644
--- a/sys/contrib/openzfs/module/zfs/dmu_send.c
+++ b/sys/contrib/openzfs/module/zfs/dmu_send.c
@@ -1,3094 +1,3096 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright 2016 RackTop Systems.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/spa_impl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_znode.h>
#include <zfs_fletcher.h>
#include <sys/avl.h>
#include <sys/ddt.h>
#include <sys/zfs_onexit.h>
#include <sys/dmu_send.h>
#include <sys/dmu_recv.h>
#include <sys/dsl_destroy.h>
#include <sys/blkptr.h>
#include <sys/dsl_bookmark.h>
#include <sys/zfeature.h>
#include <sys/bqueue.h>
#include <sys/zvol.h>
#include <sys/policy.h>
#include <sys/objlist.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
/* Set this tunable to TRUE to replace corrupt data with 0x2f5baddb10c */
int zfs_send_corrupt_data = B_FALSE;
/*
* This tunable controls the amount of data (measured in bytes) that will be
* prefetched by zfs send. If the main thread is blocking on reads that haven't
* completed, this variable might need to be increased. If instead the main
* thread is issuing new reads because the prefetches have fallen out of the
* cache, this may need to be decreased.
*/
int zfs_send_queue_length = SPA_MAXBLOCKSIZE;
/*
* This tunable controls the length of the queues that zfs send worker threads
* use to communicate. If the send_main_thread is blocking on these queues,
* this variable may need to be increased. If there is a significant slowdown
* at the start of a send as these threads consume all the available IO
* resources, this variable may need to be decreased.
*/
int zfs_send_no_prefetch_queue_length = 1024 * 1024;
/*
* These tunables control the fill fraction of the queues by zfs send. The fill
* fraction controls the frequency with which threads have to be cv_signaled.
* If a lot of cpu time is being spent on cv_signal, then these should be tuned
* down. If the queues empty before the signalled thread can catch up, then
* these should be tuned up.
*/
int zfs_send_queue_ff = 20;
int zfs_send_no_prefetch_queue_ff = 20;
/*
* Use this to override the recordsize calculation for fast zfs send estimates.
*/
int zfs_override_estimate_recordsize = 0;
/* Set this tunable to FALSE to disable setting of DRR_FLAG_FREERECORDS */
int zfs_send_set_freerecords_bit = B_TRUE;
/* Set this tunable to FALSE is disable sending unmodified spill blocks. */
int zfs_send_unmodified_spill_blocks = B_TRUE;
static inline boolean_t
overflow_multiply(uint64_t a, uint64_t b, uint64_t *c)
{
uint64_t temp = a * b;
if (b != 0 && temp / b != a)
return (B_FALSE);
*c = temp;
return (B_TRUE);
}
struct send_thread_arg {
bqueue_t q;
objset_t *os; /* Objset to traverse */
uint64_t fromtxg; /* Traverse from this txg */
int flags; /* flags to pass to traverse_dataset */
int error_code;
boolean_t cancel;
zbookmark_phys_t resume;
uint64_t *num_blocks_visited;
};
struct redact_list_thread_arg {
boolean_t cancel;
bqueue_t q;
zbookmark_phys_t resume;
redaction_list_t *rl;
boolean_t mark_redact;
int error_code;
uint64_t *num_blocks_visited;
};
struct send_merge_thread_arg {
bqueue_t q;
objset_t *os;
struct redact_list_thread_arg *from_arg;
struct send_thread_arg *to_arg;
struct redact_list_thread_arg *redact_arg;
int error;
boolean_t cancel;
};
struct send_range {
boolean_t eos_marker; /* Marks the end of the stream */
uint64_t object;
uint64_t start_blkid;
uint64_t end_blkid;
bqueue_node_t ln;
enum type {DATA, HOLE, OBJECT, OBJECT_RANGE, REDACT,
PREVIOUSLY_REDACTED} type;
union {
struct srd {
dmu_object_type_t obj_type;
uint32_t datablksz; // logical size
uint32_t datasz; // payload size
blkptr_t bp;
arc_buf_t *abuf;
abd_t *abd;
kmutex_t lock;
kcondvar_t cv;
boolean_t io_outstanding;
int io_err;
} data;
struct srh {
uint32_t datablksz;
} hole;
struct sro {
/*
* This is a pointer because embedding it in the
* struct causes these structures to be massively larger
* for all range types; this makes the code much less
* memory efficient.
*/
dnode_phys_t *dnp;
blkptr_t bp;
} object;
struct srr {
uint32_t datablksz;
} redact;
struct sror {
blkptr_t bp;
} object_range;
} sru;
};
/*
* The list of data whose inclusion in a send stream can be pending from
* one call to backup_cb to another. Multiple calls to dump_free(),
* dump_freeobjects(), and dump_redact() can be aggregated into a single
* DRR_FREE, DRR_FREEOBJECTS, or DRR_REDACT replay record.
*/
typedef enum {
PENDING_NONE,
PENDING_FREE,
PENDING_FREEOBJECTS,
PENDING_REDACT
} dmu_pendop_t;
typedef struct dmu_send_cookie {
dmu_replay_record_t *dsc_drr;
dmu_send_outparams_t *dsc_dso;
offset_t *dsc_off;
objset_t *dsc_os;
zio_cksum_t dsc_zc;
uint64_t dsc_toguid;
uint64_t dsc_fromtxg;
int dsc_err;
dmu_pendop_t dsc_pending_op;
uint64_t dsc_featureflags;
uint64_t dsc_last_data_object;
uint64_t dsc_last_data_offset;
uint64_t dsc_resume_object;
uint64_t dsc_resume_offset;
boolean_t dsc_sent_begin;
boolean_t dsc_sent_end;
} dmu_send_cookie_t;
static int do_dump(dmu_send_cookie_t *dscp, struct send_range *range);
static void
range_free(struct send_range *range)
{
if (range->type == OBJECT) {
size_t size = sizeof (dnode_phys_t) *
(range->sru.object.dnp->dn_extra_slots + 1);
kmem_free(range->sru.object.dnp, size);
} else if (range->type == DATA) {
mutex_enter(&range->sru.data.lock);
while (range->sru.data.io_outstanding)
cv_wait(&range->sru.data.cv, &range->sru.data.lock);
if (range->sru.data.abd != NULL)
abd_free(range->sru.data.abd);
if (range->sru.data.abuf != NULL) {
arc_buf_destroy(range->sru.data.abuf,
&range->sru.data.abuf);
}
mutex_exit(&range->sru.data.lock);
cv_destroy(&range->sru.data.cv);
mutex_destroy(&range->sru.data.lock);
}
kmem_free(range, sizeof (*range));
}
/*
* For all record types except BEGIN, fill in the checksum (overlaid in
* drr_u.drr_checksum.drr_checksum). The checksum verifies everything
* up to the start of the checksum itself.
*/
static int
dump_record(dmu_send_cookie_t *dscp, void *payload, int payload_len)
{
dmu_send_outparams_t *dso = dscp->dsc_dso;
ASSERT3U(offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
==, sizeof (dmu_replay_record_t) - sizeof (zio_cksum_t));
(void) fletcher_4_incremental_native(dscp->dsc_drr,
offsetof(dmu_replay_record_t, drr_u.drr_checksum.drr_checksum),
&dscp->dsc_zc);
if (dscp->dsc_drr->drr_type == DRR_BEGIN) {
dscp->dsc_sent_begin = B_TRUE;
} else {
ASSERT(ZIO_CHECKSUM_IS_ZERO(&dscp->dsc_drr->drr_u.
drr_checksum.drr_checksum));
dscp->dsc_drr->drr_u.drr_checksum.drr_checksum = dscp->dsc_zc;
}
if (dscp->dsc_drr->drr_type == DRR_END) {
dscp->dsc_sent_end = B_TRUE;
}
(void) fletcher_4_incremental_native(&dscp->dsc_drr->
drr_u.drr_checksum.drr_checksum,
sizeof (zio_cksum_t), &dscp->dsc_zc);
*dscp->dsc_off += sizeof (dmu_replay_record_t);
dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, dscp->dsc_drr,
sizeof (dmu_replay_record_t), dso->dso_arg);
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
if (payload_len != 0) {
*dscp->dsc_off += payload_len;
/*
* payload is null when dso_dryrun == B_TRUE (i.e. when we're
* doing a send size calculation)
*/
if (payload != NULL) {
(void) fletcher_4_incremental_native(
payload, payload_len, &dscp->dsc_zc);
}
/*
* The code does not rely on this (len being a multiple of 8).
* We keep this assertion because of the corresponding assertion
* in receive_read(). Keeping this assertion ensures that we do
* not inadvertently break backwards compatibility (causing the
* assertion in receive_read() to trigger on old software).
*
* Raw sends cannot be received on old software, and so can
* bypass this assertion.
*/
ASSERT((payload_len % 8 == 0) ||
(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW));
dscp->dsc_err = dso->dso_outfunc(dscp->dsc_os, payload,
payload_len, dso->dso_arg);
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
}
return (0);
}
/*
* Fill in the drr_free struct, or perform aggregation if the previous record is
* also a free record, and the two are adjacent.
*
* Note that we send free records even for a full send, because we want to be
* able to receive a full send as a clone, which requires a list of all the free
* and freeobject records that were generated on the source.
*/
static int
dump_free(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
uint64_t length)
{
struct drr_free *drrf = &(dscp->dsc_drr->drr_u.drr_free);
/*
* When we receive a free record, dbuf_free_range() assumes
* that the receiving system doesn't have any dbufs in the range
* being freed. This is always true because there is a one-record
* constraint: we only send one WRITE record for any given
* object,offset. We know that the one-record constraint is
* true because we always send data in increasing order by
* object,offset.
*
* If the increasing-order constraint ever changes, we should find
* another way to assert that the one-record constraint is still
* satisfied.
*/
ASSERT(object > dscp->dsc_last_data_object ||
(object == dscp->dsc_last_data_object &&
offset > dscp->dsc_last_data_offset));
/*
* If there is a pending op, but it's not PENDING_FREE, push it out,
* since free block aggregation can only be done for blocks of the
* same type (i.e., DRR_FREE records can only be aggregated with
* other DRR_FREE records. DRR_FREEOBJECTS records can only be
* aggregated with other DRR_FREEOBJECTS records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_FREE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_FREE) {
/*
* Check to see whether this free block can be aggregated
* with pending one.
*/
if (drrf->drr_object == object && drrf->drr_offset +
drrf->drr_length == offset) {
if (offset + length < offset || length == UINT64_MAX)
drrf->drr_length = UINT64_MAX;
else
drrf->drr_length += length;
return (0);
} else {
/* not a continuation. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* create a FREE record and make it pending */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREE;
drrf->drr_object = object;
drrf->drr_offset = offset;
if (offset + length < offset)
drrf->drr_length = DMU_OBJECT_END;
else
drrf->drr_length = length;
drrf->drr_toguid = dscp->dsc_toguid;
if (length == DMU_OBJECT_END) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
} else {
dscp->dsc_pending_op = PENDING_FREE;
}
return (0);
}
/*
* Fill in the drr_redact struct, or perform aggregation if the previous record
* is also a redaction record, and the two are adjacent.
*/
static int
dump_redact(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
uint64_t length)
{
struct drr_redact *drrr = &dscp->dsc_drr->drr_u.drr_redact;
/*
* If there is a pending op, but it's not PENDING_REDACT, push it out,
* since free block aggregation can only be done for blocks of the
* same type (i.e., DRR_REDACT records can only be aggregated with
* other DRR_REDACT records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_REDACT) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_REDACT) {
/*
* Check to see whether this redacted block can be aggregated
* with pending one.
*/
if (drrr->drr_object == object && drrr->drr_offset +
drrr->drr_length == offset) {
drrr->drr_length += length;
return (0);
} else {
/* not a continuation. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* create a REDACT record and make it pending */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_REDACT;
drrr->drr_object = object;
drrr->drr_offset = offset;
drrr->drr_length = length;
drrr->drr_toguid = dscp->dsc_toguid;
dscp->dsc_pending_op = PENDING_REDACT;
return (0);
}
static int
dmu_dump_write(dmu_send_cookie_t *dscp, dmu_object_type_t type, uint64_t object,
uint64_t offset, int lsize, int psize, const blkptr_t *bp, void *data)
{
uint64_t payload_size;
boolean_t raw = (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
struct drr_write *drrw = &(dscp->dsc_drr->drr_u.drr_write);
/*
* We send data in increasing object, offset order.
* See comment in dump_free() for details.
*/
ASSERT(object > dscp->dsc_last_data_object ||
(object == dscp->dsc_last_data_object &&
offset > dscp->dsc_last_data_offset));
dscp->dsc_last_data_object = object;
dscp->dsc_last_data_offset = offset + lsize - 1;
/*
* If there is any kind of pending aggregation (currently either
* a grouping of free objects or free blocks), push it out to
* the stream, since aggregation can't be done across operations
* of different types.
*/
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write a WRITE record */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE;
drrw->drr_object = object;
drrw->drr_type = type;
drrw->drr_offset = offset;
drrw->drr_toguid = dscp->dsc_toguid;
drrw->drr_logical_size = lsize;
/* only set the compression fields if the buf is compressed or raw */
if (raw || lsize != psize) {
ASSERT(raw || dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_COMPRESSED);
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT3S(psize, >, 0);
if (raw) {
ASSERT(BP_IS_PROTECTED(bp));
/*
* This is a raw protected block so we need to pass
* along everything the receiving side will need to
* interpret this block, including the byteswap, salt,
* IV, and MAC.
*/
if (BP_SHOULD_BYTESWAP(bp))
drrw->drr_flags |= DRR_RAW_BYTESWAP;
zio_crypt_decode_params_bp(bp, drrw->drr_salt,
drrw->drr_iv);
zio_crypt_decode_mac_bp(bp, drrw->drr_mac);
} else {
/* this is a compressed block */
ASSERT(dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_COMPRESSED);
ASSERT(!BP_SHOULD_BYTESWAP(bp));
ASSERT(!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)));
ASSERT3U(BP_GET_COMPRESS(bp), !=, ZIO_COMPRESS_OFF);
ASSERT3S(lsize, >=, psize);
}
/* set fields common to compressed and raw sends */
drrw->drr_compressiontype = BP_GET_COMPRESS(bp);
drrw->drr_compressed_size = psize;
payload_size = drrw->drr_compressed_size;
} else {
payload_size = drrw->drr_logical_size;
}
if (bp == NULL || BP_IS_EMBEDDED(bp) || (BP_IS_PROTECTED(bp) && !raw)) {
/*
* There's no pre-computed checksum for partial-block writes,
* embedded BP's, or encrypted BP's that are being sent as
* plaintext, so (like fletcher4-checksummed blocks) userland
* will have to compute a dedup-capable checksum itself.
*/
drrw->drr_checksumtype = ZIO_CHECKSUM_OFF;
} else {
drrw->drr_checksumtype = BP_GET_CHECKSUM(bp);
if (zio_checksum_table[drrw->drr_checksumtype].ci_flags &
ZCHECKSUM_FLAG_DEDUP)
drrw->drr_flags |= DRR_CHECKSUM_DEDUP;
DDK_SET_LSIZE(&drrw->drr_key, BP_GET_LSIZE(bp));
DDK_SET_PSIZE(&drrw->drr_key, BP_GET_PSIZE(bp));
DDK_SET_COMPRESS(&drrw->drr_key, BP_GET_COMPRESS(bp));
DDK_SET_CRYPT(&drrw->drr_key, BP_IS_PROTECTED(bp));
drrw->drr_key.ddk_cksum = bp->blk_cksum;
}
if (dump_record(dscp, data, payload_size) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_write_embedded(dmu_send_cookie_t *dscp, uint64_t object, uint64_t offset,
int blksz, const blkptr_t *bp)
{
char buf[BPE_PAYLOAD_SIZE];
struct drr_write_embedded *drrw =
&(dscp->dsc_drr->drr_u.drr_write_embedded);
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
ASSERT(BP_IS_EMBEDDED(bp));
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_WRITE_EMBEDDED;
drrw->drr_object = object;
drrw->drr_offset = offset;
drrw->drr_length = blksz;
drrw->drr_toguid = dscp->dsc_toguid;
drrw->drr_compression = BP_GET_COMPRESS(bp);
drrw->drr_etype = BPE_GET_ETYPE(bp);
drrw->drr_lsize = BPE_GET_LSIZE(bp);
drrw->drr_psize = BPE_GET_PSIZE(bp);
decode_embedded_bp_compressed(bp, buf);
if (dump_record(dscp, buf, P2ROUNDUP(drrw->drr_psize, 8)) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_spill(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
void *data)
{
struct drr_spill *drrs = &(dscp->dsc_drr->drr_u.drr_spill);
uint64_t blksz = BP_GET_LSIZE(bp);
uint64_t payload_size = blksz;
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write a SPILL record */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_SPILL;
drrs->drr_object = object;
drrs->drr_length = blksz;
drrs->drr_toguid = dscp->dsc_toguid;
/* See comment in dump_dnode() for full details */
if (zfs_send_unmodified_spill_blocks &&
(bp->blk_birth <= dscp->dsc_fromtxg)) {
drrs->drr_flags |= DRR_SPILL_UNMODIFIED;
}
/* handle raw send fields */
if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
ASSERT(BP_IS_PROTECTED(bp));
if (BP_SHOULD_BYTESWAP(bp))
drrs->drr_flags |= DRR_RAW_BYTESWAP;
drrs->drr_compressiontype = BP_GET_COMPRESS(bp);
drrs->drr_compressed_size = BP_GET_PSIZE(bp);
zio_crypt_decode_params_bp(bp, drrs->drr_salt, drrs->drr_iv);
zio_crypt_decode_mac_bp(bp, drrs->drr_mac);
payload_size = drrs->drr_compressed_size;
}
if (dump_record(dscp, data, payload_size) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_freeobjects(dmu_send_cookie_t *dscp, uint64_t firstobj, uint64_t numobjs)
{
struct drr_freeobjects *drrfo = &(dscp->dsc_drr->drr_u.drr_freeobjects);
uint64_t maxobj = DNODES_PER_BLOCK *
(DMU_META_DNODE(dscp->dsc_os)->dn_maxblkid + 1);
/*
* ZoL < 0.7 does not handle large FREEOBJECTS records correctly,
* leading to zfs recv never completing. to avoid this issue, don't
* send FREEOBJECTS records for object IDs which cannot exist on the
* receiving side.
*/
if (maxobj > 0) {
if (maxobj <= firstobj)
return (0);
if (maxobj < firstobj + numobjs)
numobjs = maxobj - firstobj;
}
/*
* If there is a pending op, but it's not PENDING_FREEOBJECTS,
* push it out, since free block aggregation can only be done for
* blocks of the same type (i.e., DRR_FREE records can only be
* aggregated with other DRR_FREE records. DRR_FREEOBJECTS records
* can only be aggregated with other DRR_FREEOBJECTS records).
*/
if (dscp->dsc_pending_op != PENDING_NONE &&
dscp->dsc_pending_op != PENDING_FREEOBJECTS) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
if (dscp->dsc_pending_op == PENDING_FREEOBJECTS) {
/*
* See whether this free object array can be aggregated
* with pending one
*/
if (drrfo->drr_firstobj + drrfo->drr_numobjs == firstobj) {
drrfo->drr_numobjs += numobjs;
return (0);
} else {
/* can't be aggregated. Push out pending record */
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
}
/* write a FREEOBJECTS record */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_FREEOBJECTS;
drrfo->drr_firstobj = firstobj;
drrfo->drr_numobjs = numobjs;
drrfo->drr_toguid = dscp->dsc_toguid;
dscp->dsc_pending_op = PENDING_FREEOBJECTS;
return (0);
}
static int
dump_dnode(dmu_send_cookie_t *dscp, const blkptr_t *bp, uint64_t object,
dnode_phys_t *dnp)
{
struct drr_object *drro = &(dscp->dsc_drr->drr_u.drr_object);
int bonuslen;
if (object < dscp->dsc_resume_object) {
/*
* Note: when resuming, we will visit all the dnodes in
* the block of dnodes that we are resuming from. In
* this case it's unnecessary to send the dnodes prior to
* the one we are resuming from. We should be at most one
* block's worth of dnodes behind the resume point.
*/
ASSERT3U(dscp->dsc_resume_object - object, <,
1 << (DNODE_BLOCK_SHIFT - DNODE_SHIFT));
return (0);
}
if (dnp == NULL || dnp->dn_type == DMU_OT_NONE)
return (dump_freeobjects(dscp, object, 1));
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
/* write an OBJECT record */
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT;
drro->drr_object = object;
drro->drr_type = dnp->dn_type;
drro->drr_bonustype = dnp->dn_bonustype;
drro->drr_blksz = dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT;
drro->drr_bonuslen = dnp->dn_bonuslen;
drro->drr_dn_slots = dnp->dn_extra_slots + 1;
drro->drr_checksumtype = dnp->dn_checksum;
drro->drr_compress = dnp->dn_compress;
drro->drr_toguid = dscp->dsc_toguid;
if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS) &&
drro->drr_blksz > SPA_OLD_MAXBLOCKSIZE)
drro->drr_blksz = SPA_OLD_MAXBLOCKSIZE;
bonuslen = P2ROUNDUP(dnp->dn_bonuslen, 8);
if ((dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
ASSERT(BP_IS_ENCRYPTED(bp));
if (BP_SHOULD_BYTESWAP(bp))
drro->drr_flags |= DRR_RAW_BYTESWAP;
/* needed for reconstructing dnp on recv side */
drro->drr_maxblkid = dnp->dn_maxblkid;
drro->drr_indblkshift = dnp->dn_indblkshift;
drro->drr_nlevels = dnp->dn_nlevels;
drro->drr_nblkptr = dnp->dn_nblkptr;
/*
* Since we encrypt the entire bonus area, the (raw) part
* beyond the bonuslen is actually nonzero, so we need
* to send it.
*/
if (bonuslen != 0) {
drro->drr_raw_bonuslen = DN_MAX_BONUS_LEN(dnp);
bonuslen = drro->drr_raw_bonuslen;
}
}
/*
* DRR_OBJECT_SPILL is set for every dnode which references a
* spill block. This allows the receiving pool to definitively
* determine when a spill block should be kept or freed.
*/
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
drro->drr_flags |= DRR_OBJECT_SPILL;
if (dump_record(dscp, DN_BONUS(dnp), bonuslen) != 0)
return (SET_ERROR(EINTR));
/* Free anything past the end of the file. */
if (dump_free(dscp, object, (dnp->dn_maxblkid + 1) *
(dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT), DMU_OBJECT_END) != 0)
return (SET_ERROR(EINTR));
/*
* Send DRR_SPILL records for unmodified spill blocks. This is useful
* because changing certain attributes of the object (e.g. blocksize)
* can cause old versions of ZFS to incorrectly remove a spill block.
* Including these records in the stream forces an up to date version
* to always be written ensuring they're never lost. Current versions
* of the code which understand the DRR_FLAG_SPILL_BLOCK feature can
* ignore these unmodified spill blocks.
*/
if (zfs_send_unmodified_spill_blocks &&
(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) &&
(DN_SPILL_BLKPTR(dnp)->blk_birth <= dscp->dsc_fromtxg)) {
struct send_range record;
blkptr_t *bp = DN_SPILL_BLKPTR(dnp);
bzero(&record, sizeof (struct send_range));
record.type = DATA;
record.object = object;
record.eos_marker = B_FALSE;
record.start_blkid = DMU_SPILL_BLKID;
record.end_blkid = record.start_blkid + 1;
record.sru.data.bp = *bp;
record.sru.data.obj_type = dnp->dn_type;
record.sru.data.datablksz = BP_GET_LSIZE(bp);
if (do_dump(dscp, &record) != 0)
return (SET_ERROR(EINTR));
}
if (dscp->dsc_err != 0)
return (SET_ERROR(EINTR));
return (0);
}
static int
dump_object_range(dmu_send_cookie_t *dscp, const blkptr_t *bp,
uint64_t firstobj, uint64_t numslots)
{
struct drr_object_range *drror =
&(dscp->dsc_drr->drr_u.drr_object_range);
/* we only use this record type for raw sends */
ASSERT(BP_IS_PROTECTED(bp));
ASSERT(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW);
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_DNODE);
ASSERT0(BP_GET_LEVEL(bp));
if (dscp->dsc_pending_op != PENDING_NONE) {
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
dscp->dsc_pending_op = PENDING_NONE;
}
bzero(dscp->dsc_drr, sizeof (dmu_replay_record_t));
dscp->dsc_drr->drr_type = DRR_OBJECT_RANGE;
drror->drr_firstobj = firstobj;
drror->drr_numslots = numslots;
drror->drr_toguid = dscp->dsc_toguid;
if (BP_SHOULD_BYTESWAP(bp))
drror->drr_flags |= DRR_RAW_BYTESWAP;
zio_crypt_decode_params_bp(bp, drror->drr_salt, drror->drr_iv);
zio_crypt_decode_mac_bp(bp, drror->drr_mac);
if (dump_record(dscp, NULL, 0) != 0)
return (SET_ERROR(EINTR));
return (0);
}
static boolean_t
send_do_embed(const blkptr_t *bp, uint64_t featureflags)
{
if (!BP_IS_EMBEDDED(bp))
return (B_FALSE);
/*
* Compression function must be legacy, or explicitly enabled.
*/
if ((BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_LEGACY_FUNCTIONS &&
!(featureflags & DMU_BACKUP_FEATURE_LZ4)))
return (B_FALSE);
/*
* If we have not set the ZSTD feature flag, we can't send ZSTD
* compressed embedded blocks, as the receiver may not support them.
*/
if ((BP_GET_COMPRESS(bp) == ZIO_COMPRESS_ZSTD &&
!(featureflags & DMU_BACKUP_FEATURE_ZSTD)))
return (B_FALSE);
/*
* Embed type must be explicitly enabled.
*/
switch (BPE_GET_ETYPE(bp)) {
case BP_EMBEDDED_TYPE_DATA:
if (featureflags & DMU_BACKUP_FEATURE_EMBED_DATA)
return (B_TRUE);
break;
default:
return (B_FALSE);
}
return (B_FALSE);
}
/*
* This function actually handles figuring out what kind of record needs to be
* dumped, and calling the appropriate helper function. In most cases,
* the data has already been read by send_reader_thread().
*/
static int
do_dump(dmu_send_cookie_t *dscp, struct send_range *range)
{
int err = 0;
switch (range->type) {
case OBJECT:
err = dump_dnode(dscp, &range->sru.object.bp, range->object,
range->sru.object.dnp);
return (err);
case OBJECT_RANGE: {
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
if (!(dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW)) {
return (0);
}
uint64_t epb = BP_GET_LSIZE(&range->sru.object_range.bp) >>
DNODE_SHIFT;
uint64_t firstobj = range->start_blkid * epb;
err = dump_object_range(dscp, &range->sru.object_range.bp,
firstobj, epb);
break;
}
case REDACT: {
struct srr *srrp = &range->sru.redact;
err = dump_redact(dscp, range->object, range->start_blkid *
srrp->datablksz, (range->end_blkid - range->start_blkid) *
srrp->datablksz);
return (err);
}
case DATA: {
struct srd *srdp = &range->sru.data;
blkptr_t *bp = &srdp->bp;
spa_t *spa =
dmu_objset_spa(dscp->dsc_os);
ASSERT3U(srdp->datablksz, ==, BP_GET_LSIZE(bp));
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
if (BP_GET_TYPE(bp) == DMU_OT_SA) {
arc_flags_t aflags = ARC_FLAG_WAIT;
enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
if (dscp->dsc_featureflags & DMU_BACKUP_FEATURE_RAW) {
ASSERT(BP_IS_PROTECTED(bp));
zioflags |= ZIO_FLAG_RAW;
}
zbookmark_phys_t zb;
ASSERT3U(range->start_blkid, ==, DMU_SPILL_BLKID);
zb.zb_objset = dmu_objset_id(dscp->dsc_os);
zb.zb_object = range->object;
zb.zb_level = 0;
zb.zb_blkid = range->start_blkid;
arc_buf_t *abuf = NULL;
if (!dscp->dsc_dso->dso_dryrun && arc_read(NULL, spa,
bp, arc_getbuf_func, &abuf, ZIO_PRIORITY_ASYNC_READ,
zioflags, &aflags, &zb) != 0)
return (SET_ERROR(EIO));
err = dump_spill(dscp, bp, zb.zb_object,
(abuf == NULL ? NULL : abuf->b_data));
if (abuf != NULL)
arc_buf_destroy(abuf, &abuf);
return (err);
}
if (send_do_embed(bp, dscp->dsc_featureflags)) {
err = dump_write_embedded(dscp, range->object,
range->start_blkid * srdp->datablksz,
srdp->datablksz, bp);
return (err);
}
ASSERT(range->object > dscp->dsc_resume_object ||
(range->object == dscp->dsc_resume_object &&
range->start_blkid * srdp->datablksz >=
dscp->dsc_resume_offset));
/* it's a level-0 block of a regular object */
mutex_enter(&srdp->lock);
while (srdp->io_outstanding)
cv_wait(&srdp->cv, &srdp->lock);
err = srdp->io_err;
mutex_exit(&srdp->lock);
if (err != 0) {
if (zfs_send_corrupt_data &&
!dscp->dsc_dso->dso_dryrun) {
/*
* Send a block filled with 0x"zfs badd bloc"
*/
srdp->abuf = arc_alloc_buf(spa, &srdp->abuf,
ARC_BUFC_DATA, srdp->datablksz);
uint64_t *ptr;
for (ptr = srdp->abuf->b_data;
(char *)ptr < (char *)srdp->abuf->b_data +
srdp->datablksz; ptr++)
*ptr = 0x2f5baddb10cULL;
} else {
return (SET_ERROR(EIO));
}
}
ASSERT(dscp->dsc_dso->dso_dryrun ||
srdp->abuf != NULL || srdp->abd != NULL);
uint64_t offset = range->start_blkid * srdp->datablksz;
char *data = NULL;
if (srdp->abd != NULL) {
data = abd_to_buf(srdp->abd);
ASSERT3P(srdp->abuf, ==, NULL);
} else if (srdp->abuf != NULL) {
data = srdp->abuf->b_data;
}
/*
* If we have large blocks stored on disk but the send flags
* don't allow us to send large blocks, we split the data from
* the arc buf into chunks.
*/
if (srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
!(dscp->dsc_featureflags &
DMU_BACKUP_FEATURE_LARGE_BLOCKS)) {
while (srdp->datablksz > 0 && err == 0) {
int n = MIN(srdp->datablksz,
SPA_OLD_MAXBLOCKSIZE);
err = dmu_dump_write(dscp, srdp->obj_type,
range->object, offset, n, n, NULL, data);
offset += n;
/*
* When doing dry run, data==NULL is used as a
* sentinel value by
* dmu_dump_write()->dump_record().
*/
if (data != NULL)
data += n;
srdp->datablksz -= n;
}
} else {
err = dmu_dump_write(dscp, srdp->obj_type,
range->object, offset,
srdp->datablksz, srdp->datasz, bp, data);
}
return (err);
}
case HOLE: {
struct srh *srhp = &range->sru.hole;
if (range->object == DMU_META_DNODE_OBJECT) {
uint32_t span = srhp->datablksz >> DNODE_SHIFT;
uint64_t first_obj = range->start_blkid * span;
uint64_t numobj = range->end_blkid * span - first_obj;
return (dump_freeobjects(dscp, first_obj, numobj));
}
uint64_t offset = 0;
/*
* If this multiply overflows, we don't need to send this block.
* Even if it has a birth time, it can never not be a hole, so
* we don't need to send records for it.
*/
if (!overflow_multiply(range->start_blkid, srhp->datablksz,
&offset)) {
return (0);
}
uint64_t len = 0;
if (!overflow_multiply(range->end_blkid, srhp->datablksz, &len))
len = UINT64_MAX;
len = len - offset;
return (dump_free(dscp, range->object, offset, len));
}
default:
panic("Invalid range type in do_dump: %d", range->type);
}
return (err);
}
static struct send_range *
range_alloc(enum type type, uint64_t object, uint64_t start_blkid,
uint64_t end_blkid, boolean_t eos)
{
struct send_range *range = kmem_alloc(sizeof (*range), KM_SLEEP);
range->type = type;
range->object = object;
range->start_blkid = start_blkid;
range->end_blkid = end_blkid;
range->eos_marker = eos;
if (type == DATA) {
range->sru.data.abd = NULL;
range->sru.data.abuf = NULL;
mutex_init(&range->sru.data.lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&range->sru.data.cv, NULL, CV_DEFAULT, NULL);
range->sru.data.io_outstanding = 0;
range->sru.data.io_err = 0;
}
return (range);
}
/*
* This is the callback function to traverse_dataset that acts as a worker
* thread for dmu_send_impl.
*/
/*ARGSUSED*/
static int
send_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const struct dnode_phys *dnp, void *arg)
{
struct send_thread_arg *sta = arg;
struct send_range *record;
ASSERT(zb->zb_object == DMU_META_DNODE_OBJECT ||
zb->zb_object >= sta->resume.zb_object);
/*
* All bps of an encrypted os should have the encryption bit set.
* If this is not true it indicates tampering and we report an error.
*/
if (sta->os->os_encrypted &&
!BP_IS_HOLE(bp) && !BP_USES_CRYPT(bp)) {
spa_log_error(spa, zb);
zfs_panic_recover("unencrypted block in encrypted "
"object set %llu", dmu_objset_id(sta->os));
return (SET_ERROR(EIO));
}
if (sta->cancel)
return (SET_ERROR(EINTR));
if (zb->zb_object != DMU_META_DNODE_OBJECT &&
DMU_OBJECT_IS_SPECIAL(zb->zb_object))
return (0);
atomic_inc_64(sta->num_blocks_visited);
if (zb->zb_level == ZB_DNODE_LEVEL) {
if (zb->zb_object == DMU_META_DNODE_OBJECT)
return (0);
record = range_alloc(OBJECT, zb->zb_object, 0, 0, B_FALSE);
record->sru.object.bp = *bp;
size_t size = sizeof (*dnp) * (dnp->dn_extra_slots + 1);
record->sru.object.dnp = kmem_alloc(size, KM_SLEEP);
bcopy(dnp, record->sru.object.dnp, size);
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
if (zb->zb_level == 0 && zb->zb_object == DMU_META_DNODE_OBJECT &&
!BP_IS_HOLE(bp)) {
record = range_alloc(OBJECT_RANGE, 0, zb->zb_blkid,
zb->zb_blkid + 1, B_FALSE);
record->sru.object_range.bp = *bp;
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
if (zb->zb_level < 0 || (zb->zb_level > 0 && !BP_IS_HOLE(bp)))
return (0);
if (zb->zb_object == DMU_META_DNODE_OBJECT && !BP_IS_HOLE(bp))
return (0);
uint64_t span = bp_span_in_blocks(dnp->dn_indblkshift, zb->zb_level);
uint64_t start;
/*
* If this multiply overflows, we don't need to send this block.
* Even if it has a birth time, it can never not be a hole, so
* we don't need to send records for it.
*/
if (!overflow_multiply(span, zb->zb_blkid, &start) || (!(zb->zb_blkid ==
DMU_SPILL_BLKID || DMU_OT_IS_METADATA(dnp->dn_type)) &&
span * zb->zb_blkid > dnp->dn_maxblkid)) {
ASSERT(BP_IS_HOLE(bp));
return (0);
}
if (zb->zb_blkid == DMU_SPILL_BLKID)
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
enum type record_type = DATA;
if (BP_IS_HOLE(bp))
record_type = HOLE;
else if (BP_IS_REDACTED(bp))
record_type = REDACT;
else
record_type = DATA;
record = range_alloc(record_type, zb->zb_object, start,
(start + span < start ? 0 : start + span), B_FALSE);
uint64_t datablksz = (zb->zb_blkid == DMU_SPILL_BLKID ?
BP_GET_LSIZE(bp) : dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
if (BP_IS_HOLE(bp)) {
record->sru.hole.datablksz = datablksz;
} else if (BP_IS_REDACTED(bp)) {
record->sru.redact.datablksz = datablksz;
} else {
record->sru.data.datablksz = datablksz;
record->sru.data.obj_type = dnp->dn_type;
record->sru.data.bp = *bp;
}
bqueue_enqueue(&sta->q, record, sizeof (*record));
return (0);
}
struct redact_list_cb_arg {
uint64_t *num_blocks_visited;
bqueue_t *q;
boolean_t *cancel;
boolean_t mark_redact;
};
static int
redact_list_cb(redact_block_phys_t *rb, void *arg)
{
struct redact_list_cb_arg *rlcap = arg;
atomic_inc_64(rlcap->num_blocks_visited);
if (*rlcap->cancel)
return (-1);
struct send_range *data = range_alloc(REDACT, rb->rbp_object,
rb->rbp_blkid, rb->rbp_blkid + redact_block_get_count(rb), B_FALSE);
ASSERT3U(data->end_blkid, >, rb->rbp_blkid);
if (rlcap->mark_redact) {
data->type = REDACT;
data->sru.redact.datablksz = redact_block_get_size(rb);
} else {
data->type = PREVIOUSLY_REDACTED;
}
bqueue_enqueue(rlcap->q, data, sizeof (*data));
return (0);
}
/*
* This function kicks off the traverse_dataset. It also handles setting the
* error code of the thread in case something goes wrong, and pushes the End of
* Stream record when the traverse_dataset call has finished.
*/
static void
send_traverse_thread(void *arg)
{
struct send_thread_arg *st_arg = arg;
int err = 0;
struct send_range *data;
fstrans_cookie_t cookie = spl_fstrans_mark();
err = traverse_dataset_resume(st_arg->os->os_dsl_dataset,
st_arg->fromtxg, &st_arg->resume,
st_arg->flags, send_cb, st_arg);
if (err != EINTR)
st_arg->error_code = err;
data = range_alloc(DATA, 0, 0, 0, B_TRUE);
bqueue_enqueue_flush(&st_arg->q, data, sizeof (*data));
spl_fstrans_unmark(cookie);
thread_exit();
}
/*
* Utility function that causes End of Stream records to compare after of all
* others, so that other threads' comparison logic can stay simple.
*/
static int __attribute__((unused))
send_range_after(const struct send_range *from, const struct send_range *to)
{
if (from->eos_marker == B_TRUE)
return (1);
if (to->eos_marker == B_TRUE)
return (-1);
uint64_t from_obj = from->object;
uint64_t from_end_obj = from->object + 1;
uint64_t to_obj = to->object;
uint64_t to_end_obj = to->object + 1;
if (from_obj == 0) {
ASSERT(from->type == HOLE || from->type == OBJECT_RANGE);
from_obj = from->start_blkid << DNODES_PER_BLOCK_SHIFT;
from_end_obj = from->end_blkid << DNODES_PER_BLOCK_SHIFT;
}
if (to_obj == 0) {
ASSERT(to->type == HOLE || to->type == OBJECT_RANGE);
to_obj = to->start_blkid << DNODES_PER_BLOCK_SHIFT;
to_end_obj = to->end_blkid << DNODES_PER_BLOCK_SHIFT;
}
if (from_end_obj <= to_obj)
return (-1);
if (from_obj >= to_end_obj)
return (1);
int64_t cmp = TREE_CMP(to->type == OBJECT_RANGE, from->type ==
OBJECT_RANGE);
if (unlikely(cmp))
return (cmp);
cmp = TREE_CMP(to->type == OBJECT, from->type == OBJECT);
if (unlikely(cmp))
return (cmp);
if (from->end_blkid <= to->start_blkid)
return (-1);
if (from->start_blkid >= to->end_blkid)
return (1);
return (0);
}
/*
* Pop the new data off the queue, check that the records we receive are in
* the right order, but do not free the old data. This is used so that the
* records can be sent on to the main thread without copying the data.
*/
static struct send_range *
get_next_range_nofree(bqueue_t *bq, struct send_range *prev)
{
struct send_range *next = bqueue_dequeue(bq);
ASSERT3S(send_range_after(prev, next), ==, -1);
return (next);
}
/*
* Pop the new data off the queue, check that the records we receive are in
* the right order, and free the old data.
*/
static struct send_range *
get_next_range(bqueue_t *bq, struct send_range *prev)
{
struct send_range *next = get_next_range_nofree(bq, prev);
range_free(prev);
return (next);
}
static void
redact_list_thread(void *arg)
{
struct redact_list_thread_arg *rlt_arg = arg;
struct send_range *record;
fstrans_cookie_t cookie = spl_fstrans_mark();
if (rlt_arg->rl != NULL) {
struct redact_list_cb_arg rlcba = {0};
rlcba.cancel = &rlt_arg->cancel;
rlcba.q = &rlt_arg->q;
rlcba.num_blocks_visited = rlt_arg->num_blocks_visited;
rlcba.mark_redact = rlt_arg->mark_redact;
int err = dsl_redaction_list_traverse(rlt_arg->rl,
&rlt_arg->resume, redact_list_cb, &rlcba);
if (err != EINTR)
rlt_arg->error_code = err;
}
record = range_alloc(DATA, 0, 0, 0, B_TRUE);
bqueue_enqueue_flush(&rlt_arg->q, record, sizeof (*record));
spl_fstrans_unmark(cookie);
thread_exit();
}
/*
* Compare the start point of the two provided ranges. End of stream ranges
* compare last, objects compare before any data or hole inside that object and
* multi-object holes that start at the same object.
*/
static int
send_range_start_compare(struct send_range *r1, struct send_range *r2)
{
uint64_t r1_objequiv = r1->object;
uint64_t r1_l0equiv = r1->start_blkid;
uint64_t r2_objequiv = r2->object;
uint64_t r2_l0equiv = r2->start_blkid;
int64_t cmp = TREE_CMP(r1->eos_marker, r2->eos_marker);
if (unlikely(cmp))
return (cmp);
if (r1->object == 0) {
r1_objequiv = r1->start_blkid * DNODES_PER_BLOCK;
r1_l0equiv = 0;
}
if (r2->object == 0) {
r2_objequiv = r2->start_blkid * DNODES_PER_BLOCK;
r2_l0equiv = 0;
}
cmp = TREE_CMP(r1_objequiv, r2_objequiv);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP(r2->type == OBJECT_RANGE, r1->type == OBJECT_RANGE);
if (unlikely(cmp))
return (cmp);
cmp = TREE_CMP(r2->type == OBJECT, r1->type == OBJECT);
if (unlikely(cmp))
return (cmp);
return (TREE_CMP(r1_l0equiv, r2_l0equiv));
}
enum q_idx {
REDACT_IDX = 0,
TO_IDX,
FROM_IDX,
NUM_THREADS
};
/*
* This function returns the next range the send_merge_thread should operate on.
* The inputs are two arrays; the first one stores the range at the front of the
* queues stored in the second one. The ranges are sorted in descending
* priority order; the metadata from earlier ranges overrules metadata from
* later ranges. out_mask is used to return which threads the ranges came from;
* bit i is set if ranges[i] started at the same place as the returned range.
*
* This code is not hardcoded to compare a specific number of threads; it could
* be used with any number, just by changing the q_idx enum.
*
* The "next range" is the one with the earliest start; if two starts are equal,
* the highest-priority range is the next to operate on. If a higher-priority
* range starts in the middle of the first range, then the first range will be
* truncated to end where the higher-priority range starts, and we will operate
* on that one next time. In this way, we make sure that each block covered by
* some range gets covered by a returned range, and each block covered is
* returned using the metadata of the highest-priority range it appears in.
*
* For example, if the three ranges at the front of the queues were [2,4),
* [3,5), and [1,3), then the ranges returned would be [1,2) with the metadata
* from the third range, [2,4) with the metadata from the first range, and then
* [4,5) with the metadata from the second.
*/
static struct send_range *
find_next_range(struct send_range **ranges, bqueue_t **qs, uint64_t *out_mask)
{
int idx = 0; // index of the range with the earliest start
int i;
uint64_t bmask = 0;
for (i = 1; i < NUM_THREADS; i++) {
if (send_range_start_compare(ranges[i], ranges[idx]) < 0)
idx = i;
}
if (ranges[idx]->eos_marker) {
struct send_range *ret = range_alloc(DATA, 0, 0, 0, B_TRUE);
*out_mask = 0;
return (ret);
}
/*
* Find all the ranges that start at that same point.
*/
for (i = 0; i < NUM_THREADS; i++) {
if (send_range_start_compare(ranges[i], ranges[idx]) == 0)
bmask |= 1 << i;
}
*out_mask = bmask;
/*
* OBJECT_RANGE records only come from the TO thread, and should always
* be treated as overlapping with nothing and sent on immediately. They
* are only used in raw sends, and are never redacted.
*/
if (ranges[idx]->type == OBJECT_RANGE) {
ASSERT3U(idx, ==, TO_IDX);
ASSERT3U(*out_mask, ==, 1 << TO_IDX);
struct send_range *ret = ranges[idx];
ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
return (ret);
}
/*
* Find the first start or end point after the start of the first range.
*/
uint64_t first_change = ranges[idx]->end_blkid;
for (i = 0; i < NUM_THREADS; i++) {
if (i == idx || ranges[i]->eos_marker ||
ranges[i]->object > ranges[idx]->object ||
ranges[i]->object == DMU_META_DNODE_OBJECT)
continue;
ASSERT3U(ranges[i]->object, ==, ranges[idx]->object);
if (first_change > ranges[i]->start_blkid &&
(bmask & (1 << i)) == 0)
first_change = ranges[i]->start_blkid;
else if (first_change > ranges[i]->end_blkid)
first_change = ranges[i]->end_blkid;
}
/*
* Update all ranges to no longer overlap with the range we're
* returning. All such ranges must start at the same place as the range
* being returned, and end at or after first_change. Thus we update
* their start to first_change. If that makes them size 0, then free
* them and pull a new range from that thread.
*/
for (i = 0; i < NUM_THREADS; i++) {
if (i == idx || (bmask & (1 << i)) == 0)
continue;
ASSERT3U(first_change, >, ranges[i]->start_blkid);
ranges[i]->start_blkid = first_change;
ASSERT3U(ranges[i]->start_blkid, <=, ranges[i]->end_blkid);
if (ranges[i]->start_blkid == ranges[i]->end_blkid)
ranges[i] = get_next_range(qs[i], ranges[i]);
}
/*
* Short-circuit the simple case; if the range doesn't overlap with
* anything else, or it only overlaps with things that start at the same
* place and are longer, send it on.
*/
if (first_change == ranges[idx]->end_blkid) {
struct send_range *ret = ranges[idx];
ranges[idx] = get_next_range_nofree(qs[idx], ranges[idx]);
return (ret);
}
/*
* Otherwise, return a truncated copy of ranges[idx] and move the start
* of ranges[idx] back to first_change.
*/
struct send_range *ret = kmem_alloc(sizeof (*ret), KM_SLEEP);
*ret = *ranges[idx];
ret->end_blkid = first_change;
ranges[idx]->start_blkid = first_change;
return (ret);
}
#define FROM_AND_REDACT_BITS ((1 << REDACT_IDX) | (1 << FROM_IDX))
/*
* Merge the results from the from thread and the to thread, and then hand the
* records off to send_prefetch_thread to prefetch them. If this is not a
* send from a redaction bookmark, the from thread will push an end of stream
* record and stop, and we'll just send everything that was changed in the
* to_ds since the ancestor's creation txg. If it is, then since
* traverse_dataset has a canonical order, we can compare each change as
* they're pulled off the queues. That will give us a stream that is
* appropriately sorted, and covers all records. In addition, we pull the
* data from the redact_list_thread and use that to determine which blocks
* should be redacted.
*/
static void
send_merge_thread(void *arg)
{
struct send_merge_thread_arg *smt_arg = arg;
struct send_range *front_ranges[NUM_THREADS];
bqueue_t *queues[NUM_THREADS];
int err = 0;
fstrans_cookie_t cookie = spl_fstrans_mark();
if (smt_arg->redact_arg == NULL) {
front_ranges[REDACT_IDX] =
kmem_zalloc(sizeof (struct send_range), KM_SLEEP);
front_ranges[REDACT_IDX]->eos_marker = B_TRUE;
front_ranges[REDACT_IDX]->type = REDACT;
queues[REDACT_IDX] = NULL;
} else {
front_ranges[REDACT_IDX] =
bqueue_dequeue(&smt_arg->redact_arg->q);
queues[REDACT_IDX] = &smt_arg->redact_arg->q;
}
front_ranges[TO_IDX] = bqueue_dequeue(&smt_arg->to_arg->q);
queues[TO_IDX] = &smt_arg->to_arg->q;
front_ranges[FROM_IDX] = bqueue_dequeue(&smt_arg->from_arg->q);
queues[FROM_IDX] = &smt_arg->from_arg->q;
uint64_t mask = 0;
struct send_range *range;
for (range = find_next_range(front_ranges, queues, &mask);
!range->eos_marker && err == 0 && !smt_arg->cancel;
range = find_next_range(front_ranges, queues, &mask)) {
/*
* If the range in question was in both the from redact bookmark
* and the bookmark we're using to redact, then don't send it.
* It's already redacted on the receiving system, so a redaction
* record would be redundant.
*/
if ((mask & FROM_AND_REDACT_BITS) == FROM_AND_REDACT_BITS) {
ASSERT3U(range->type, ==, REDACT);
range_free(range);
continue;
}
bqueue_enqueue(&smt_arg->q, range, sizeof (*range));
if (smt_arg->to_arg->error_code != 0) {
err = smt_arg->to_arg->error_code;
} else if (smt_arg->from_arg->error_code != 0) {
err = smt_arg->from_arg->error_code;
} else if (smt_arg->redact_arg != NULL &&
smt_arg->redact_arg->error_code != 0) {
err = smt_arg->redact_arg->error_code;
}
}
if (smt_arg->cancel && err == 0)
err = SET_ERROR(EINTR);
smt_arg->error = err;
if (smt_arg->error != 0) {
smt_arg->to_arg->cancel = B_TRUE;
smt_arg->from_arg->cancel = B_TRUE;
if (smt_arg->redact_arg != NULL)
smt_arg->redact_arg->cancel = B_TRUE;
}
for (int i = 0; i < NUM_THREADS; i++) {
while (!front_ranges[i]->eos_marker) {
front_ranges[i] = get_next_range(queues[i],
front_ranges[i]);
}
range_free(front_ranges[i]);
}
if (range == NULL)
range = kmem_zalloc(sizeof (*range), KM_SLEEP);
range->eos_marker = B_TRUE;
bqueue_enqueue_flush(&smt_arg->q, range, 1);
spl_fstrans_unmark(cookie);
thread_exit();
}
struct send_reader_thread_arg {
struct send_merge_thread_arg *smta;
bqueue_t q;
boolean_t cancel;
boolean_t issue_reads;
uint64_t featureflags;
int error;
};
static void
dmu_send_read_done(zio_t *zio)
{
struct send_range *range = zio->io_private;
mutex_enter(&range->sru.data.lock);
if (zio->io_error != 0) {
abd_free(range->sru.data.abd);
range->sru.data.abd = NULL;
range->sru.data.io_err = zio->io_error;
}
ASSERT(range->sru.data.io_outstanding);
range->sru.data.io_outstanding = B_FALSE;
cv_broadcast(&range->sru.data.cv);
mutex_exit(&range->sru.data.lock);
}
static void
issue_data_read(struct send_reader_thread_arg *srta, struct send_range *range)
{
struct srd *srdp = &range->sru.data;
blkptr_t *bp = &srdp->bp;
objset_t *os = srta->smta->os;
ASSERT3U(range->type, ==, DATA);
ASSERT3U(range->start_blkid + 1, ==, range->end_blkid);
/*
* If we have large blocks stored on disk but
* the send flags don't allow us to send large
* blocks, we split the data from the arc buf
* into chunks.
*/
boolean_t split_large_blocks =
srdp->datablksz > SPA_OLD_MAXBLOCKSIZE &&
!(srta->featureflags & DMU_BACKUP_FEATURE_LARGE_BLOCKS);
/*
* We should only request compressed data from the ARC if all
* the following are true:
* - stream compression was requested
* - we aren't splitting large blocks into smaller chunks
* - the data won't need to be byteswapped before sending
* - this isn't an embedded block
* - this isn't metadata (if receiving on a different endian
* system it can be byteswapped more easily)
*/
boolean_t request_compressed =
(srta->featureflags & DMU_BACKUP_FEATURE_COMPRESSED) &&
!split_large_blocks && !BP_SHOULD_BYTESWAP(bp) &&
!BP_IS_EMBEDDED(bp) && !DMU_OT_IS_METADATA(BP_GET_TYPE(bp));
enum zio_flag zioflags = ZIO_FLAG_CANFAIL;
if (srta->featureflags & DMU_BACKUP_FEATURE_RAW)
zioflags |= ZIO_FLAG_RAW;
else if (request_compressed)
zioflags |= ZIO_FLAG_RAW_COMPRESS;
srdp->datasz = (zioflags & ZIO_FLAG_RAW_COMPRESS) ?
BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp);
if (!srta->issue_reads)
return;
if (BP_IS_REDACTED(bp))
return;
if (send_do_embed(bp, srta->featureflags))
return;
zbookmark_phys_t zb = {
.zb_objset = dmu_objset_id(os),
.zb_object = range->object,
.zb_level = 0,
.zb_blkid = range->start_blkid,
};
arc_flags_t aflags = ARC_FLAG_CACHED_ONLY;
int arc_err = arc_read(NULL, os->os_spa, bp,
arc_getbuf_func, &srdp->abuf, ZIO_PRIORITY_ASYNC_READ,
zioflags, &aflags, &zb);
/*
* If the data is not already cached in the ARC, we read directly
* from zio. This avoids the performance overhead of adding a new
* entry to the ARC, and we also avoid polluting the ARC cache with
* data that is not likely to be used in the future.
*/
if (arc_err != 0) {
srdp->abd = abd_alloc_linear(srdp->datasz, B_FALSE);
srdp->io_outstanding = B_TRUE;
zio_nowait(zio_read(NULL, os->os_spa, bp, srdp->abd,
srdp->datasz, dmu_send_read_done, range,
ZIO_PRIORITY_ASYNC_READ, zioflags, &zb));
}
}
/*
* Create a new record with the given values.
*/
static void
enqueue_range(struct send_reader_thread_arg *srta, bqueue_t *q, dnode_t *dn,
uint64_t blkid, uint64_t count, const blkptr_t *bp, uint32_t datablksz)
{
enum type range_type = (bp == NULL || BP_IS_HOLE(bp) ? HOLE :
(BP_IS_REDACTED(bp) ? REDACT : DATA));
struct send_range *range = range_alloc(range_type, dn->dn_object,
blkid, blkid + count, B_FALSE);
if (blkid == DMU_SPILL_BLKID)
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_SA);
switch (range_type) {
case HOLE:
range->sru.hole.datablksz = datablksz;
break;
case DATA:
ASSERT3U(count, ==, 1);
range->sru.data.datablksz = datablksz;
range->sru.data.obj_type = dn->dn_type;
range->sru.data.bp = *bp;
issue_data_read(srta, range);
break;
case REDACT:
range->sru.redact.datablksz = datablksz;
break;
default:
break;
}
bqueue_enqueue(q, range, datablksz);
}
/*
* This thread is responsible for two things: First, it retrieves the correct
* blkptr in the to ds if we need to send the data because of something from
* the from thread. As a result of this, we're the first ones to discover that
* some indirect blocks can be discarded because they're not holes. Second,
* it issues prefetches for the data we need to send.
*/
static void
send_reader_thread(void *arg)
{
struct send_reader_thread_arg *srta = arg;
struct send_merge_thread_arg *smta = srta->smta;
bqueue_t *inq = &smta->q;
bqueue_t *outq = &srta->q;
objset_t *os = smta->os;
fstrans_cookie_t cookie = spl_fstrans_mark();
struct send_range *range = bqueue_dequeue(inq);
int err = 0;
/*
* If the record we're analyzing is from a redaction bookmark from the
* fromds, then we need to know whether or not it exists in the tods so
* we know whether to create records for it or not. If it does, we need
* the datablksz so we can generate an appropriate record for it.
* Finally, if it isn't redacted, we need the blkptr so that we can send
* a WRITE record containing the actual data.
*/
uint64_t last_obj = UINT64_MAX;
uint64_t last_obj_exists = B_TRUE;
while (!range->eos_marker && !srta->cancel && smta->error == 0 &&
err == 0) {
switch (range->type) {
case DATA:
issue_data_read(srta, range);
bqueue_enqueue(outq, range, range->sru.data.datablksz);
range = get_next_range_nofree(inq, range);
break;
case HOLE:
case OBJECT:
case OBJECT_RANGE:
case REDACT: // Redacted blocks must exist
bqueue_enqueue(outq, range, sizeof (*range));
range = get_next_range_nofree(inq, range);
break;
case PREVIOUSLY_REDACTED: {
/*
* This entry came from the "from bookmark" when
* sending from a bookmark that has a redaction
* list. We need to check if this object/blkid
* exists in the target ("to") dataset, and if
* not then we drop this entry. We also need
* to fill in the block pointer so that we know
* what to prefetch.
*
* To accomplish the above, we first cache whether or
* not the last object we examined exists. If it
* doesn't, we can drop this record. If it does, we hold
* the dnode and use it to call dbuf_dnode_findbp. We do
* this instead of dbuf_bookmark_findbp because we will
* often operate on large ranges, and holding the dnode
* once is more efficient.
*/
boolean_t object_exists = B_TRUE;
/*
* If the data is redacted, we only care if it exists,
* so that we don't send records for objects that have
* been deleted.
*/
dnode_t *dn;
if (range->object == last_obj && !last_obj_exists) {
/*
* If we're still examining the same object as
* previously, and it doesn't exist, we don't
* need to call dbuf_bookmark_findbp.
*/
object_exists = B_FALSE;
} else {
err = dnode_hold(os, range->object, FTAG, &dn);
if (err == ENOENT) {
object_exists = B_FALSE;
err = 0;
}
last_obj = range->object;
last_obj_exists = object_exists;
}
if (err != 0) {
break;
} else if (!object_exists) {
/*
* The block was modified, but doesn't
* exist in the to dataset; if it was
* deleted in the to dataset, then we'll
* visit the hole bp for it at some point.
*/
range = get_next_range(inq, range);
continue;
}
uint64_t file_max =
(dn->dn_maxblkid < range->end_blkid ?
dn->dn_maxblkid : range->end_blkid);
/*
* The object exists, so we need to try to find the
* blkptr for each block in the range we're processing.
*/
rw_enter(&dn->dn_struct_rwlock, RW_READER);
for (uint64_t blkid = range->start_blkid;
blkid < file_max; blkid++) {
blkptr_t bp;
uint32_t datablksz =
dn->dn_phys->dn_datablkszsec <<
SPA_MINBLOCKSHIFT;
uint64_t offset = blkid * datablksz;
/*
* This call finds the next non-hole block in
* the object. This is to prevent a
* performance problem where we're unredacting
* a large hole. Using dnode_next_offset to
* skip over the large hole avoids iterating
* over every block in it.
*/
err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
&offset, 1, 1, 0);
if (err == ESRCH) {
offset = UINT64_MAX;
err = 0;
} else if (err != 0) {
break;
}
if (offset != blkid * datablksz) {
/*
* if there is a hole from here
* (blkid) to offset
*/
offset = MIN(offset, file_max *
datablksz);
uint64_t nblks = (offset / datablksz) -
blkid;
enqueue_range(srta, outq, dn, blkid,
nblks, NULL, datablksz);
blkid += nblks;
}
if (blkid >= file_max)
break;
err = dbuf_dnode_findbp(dn, 0, blkid, &bp,
NULL, NULL);
if (err != 0)
break;
ASSERT(!BP_IS_HOLE(&bp));
enqueue_range(srta, outq, dn, blkid, 1, &bp,
datablksz);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
range = get_next_range(inq, range);
}
}
}
if (srta->cancel || err != 0) {
smta->cancel = B_TRUE;
srta->error = err;
} else if (smta->error != 0) {
srta->error = smta->error;
}
while (!range->eos_marker)
range = get_next_range(inq, range);
bqueue_enqueue_flush(outq, range, 1);
spl_fstrans_unmark(cookie);
thread_exit();
}
#define NUM_SNAPS_NOT_REDACTED UINT64_MAX
struct dmu_send_params {
/* Pool args */
void *tag; // Tag that dp was held with, will be used to release dp.
dsl_pool_t *dp;
/* To snapshot args */
const char *tosnap;
dsl_dataset_t *to_ds;
/* From snapshot args */
zfs_bookmark_phys_t ancestor_zb;
uint64_t *fromredactsnaps;
/* NUM_SNAPS_NOT_REDACTED if not sending from redaction bookmark */
uint64_t numfromredactsnaps;
/* Stream params */
boolean_t is_clone;
boolean_t embedok;
boolean_t large_block_ok;
boolean_t compressok;
boolean_t rawok;
boolean_t savedok;
uint64_t resumeobj;
uint64_t resumeoff;
uint64_t saved_guid;
zfs_bookmark_phys_t *redactbook;
/* Stream output params */
dmu_send_outparams_t *dso;
/* Stream progress params */
offset_t *off;
int outfd;
char saved_toname[MAXNAMELEN];
};
static int
setup_featureflags(struct dmu_send_params *dspp, objset_t *os,
uint64_t *featureflags)
{
dsl_dataset_t *to_ds = dspp->to_ds;
dsl_pool_t *dp = dspp->dp;
#ifdef _KERNEL
if (dmu_objset_type(os) == DMU_OST_ZFS) {
uint64_t version;
if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &version) != 0)
return (SET_ERROR(EINVAL));
if (version >= ZPL_VERSION_SA)
*featureflags |= DMU_BACKUP_FEATURE_SA_SPILL;
}
#endif
/* raw sends imply large_block_ok */
if ((dspp->rawok || dspp->large_block_ok) &&
dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_BLOCKS)) {
*featureflags |= DMU_BACKUP_FEATURE_LARGE_BLOCKS;
}
/* encrypted datasets will not have embedded blocks */
if ((dspp->embedok || dspp->rawok) && !os->os_encrypted &&
spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMBEDDED_DATA)) {
*featureflags |= DMU_BACKUP_FEATURE_EMBED_DATA;
}
/* raw send implies compressok */
if (dspp->compressok || dspp->rawok)
*featureflags |= DMU_BACKUP_FEATURE_COMPRESSED;
if (dspp->rawok && os->os_encrypted)
*featureflags |= DMU_BACKUP_FEATURE_RAW;
if ((*featureflags &
(DMU_BACKUP_FEATURE_EMBED_DATA | DMU_BACKUP_FEATURE_COMPRESSED |
DMU_BACKUP_FEATURE_RAW)) != 0 &&
spa_feature_is_active(dp->dp_spa, SPA_FEATURE_LZ4_COMPRESS)) {
*featureflags |= DMU_BACKUP_FEATURE_LZ4;
}
/*
* We specifically do not include DMU_BACKUP_FEATURE_EMBED_DATA here to
* allow sending ZSTD compressed datasets to a receiver that does not
* support ZSTD
*/
if ((*featureflags &
(DMU_BACKUP_FEATURE_COMPRESSED | DMU_BACKUP_FEATURE_RAW)) != 0 &&
dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_ZSTD_COMPRESS)) {
*featureflags |= DMU_BACKUP_FEATURE_ZSTD;
}
if (dspp->resumeobj != 0 || dspp->resumeoff != 0) {
*featureflags |= DMU_BACKUP_FEATURE_RESUMING;
}
if (dspp->redactbook != NULL) {
*featureflags |= DMU_BACKUP_FEATURE_REDACTED;
}
if (dsl_dataset_feature_is_active(to_ds, SPA_FEATURE_LARGE_DNODE)) {
*featureflags |= DMU_BACKUP_FEATURE_LARGE_DNODE;
}
return (0);
}
static dmu_replay_record_t *
create_begin_record(struct dmu_send_params *dspp, objset_t *os,
uint64_t featureflags)
{
dmu_replay_record_t *drr = kmem_zalloc(sizeof (dmu_replay_record_t),
KM_SLEEP);
drr->drr_type = DRR_BEGIN;
struct drr_begin *drrb = &drr->drr_u.drr_begin;
dsl_dataset_t *to_ds = dspp->to_ds;
drrb->drr_magic = DMU_BACKUP_MAGIC;
drrb->drr_creation_time = dsl_dataset_phys(to_ds)->ds_creation_time;
drrb->drr_type = dmu_objset_type(os);
drrb->drr_toguid = dsl_dataset_phys(to_ds)->ds_guid;
drrb->drr_fromguid = dspp->ancestor_zb.zbm_guid;
DMU_SET_STREAM_HDRTYPE(drrb->drr_versioninfo, DMU_SUBSTREAM);
DMU_SET_FEATUREFLAGS(drrb->drr_versioninfo, featureflags);
if (dspp->is_clone)
drrb->drr_flags |= DRR_FLAG_CLONE;
if (dsl_dataset_phys(dspp->to_ds)->ds_flags & DS_FLAG_CI_DATASET)
drrb->drr_flags |= DRR_FLAG_CI_DATA;
if (zfs_send_set_freerecords_bit)
drrb->drr_flags |= DRR_FLAG_FREERECORDS;
drr->drr_u.drr_begin.drr_flags |= DRR_FLAG_SPILL_BLOCK;
if (dspp->savedok) {
drrb->drr_toguid = dspp->saved_guid;
strlcpy(drrb->drr_toname, dspp->saved_toname,
sizeof (drrb->drr_toname));
} else {
dsl_dataset_name(to_ds, drrb->drr_toname);
if (!to_ds->ds_is_snapshot) {
(void) strlcat(drrb->drr_toname, "@--head--",
sizeof (drrb->drr_toname));
}
}
return (drr);
}
static void
setup_to_thread(struct send_thread_arg *to_arg, objset_t *to_os,
dmu_sendstatus_t *dssp, uint64_t fromtxg, boolean_t rawok)
{
VERIFY0(bqueue_init(&to_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
to_arg->error_code = 0;
to_arg->cancel = B_FALSE;
to_arg->os = to_os;
to_arg->fromtxg = fromtxg;
to_arg->flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA;
if (rawok)
to_arg->flags |= TRAVERSE_NO_DECRYPT;
+ if (zfs_send_corrupt_data)
+ to_arg->flags |= TRAVERSE_HARD;
to_arg->num_blocks_visited = &dssp->dss_blocks;
(void) thread_create(NULL, 0, send_traverse_thread, to_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_from_thread(struct redact_list_thread_arg *from_arg,
redaction_list_t *from_rl, dmu_sendstatus_t *dssp)
{
VERIFY0(bqueue_init(&from_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
from_arg->error_code = 0;
from_arg->cancel = B_FALSE;
from_arg->rl = from_rl;
from_arg->mark_redact = B_FALSE;
from_arg->num_blocks_visited = &dssp->dss_blocks;
/*
* If from_ds is null, send_traverse_thread just returns success and
* enqueues an eos marker.
*/
(void) thread_create(NULL, 0, redact_list_thread, from_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_redact_list_thread(struct redact_list_thread_arg *rlt_arg,
struct dmu_send_params *dspp, redaction_list_t *rl, dmu_sendstatus_t *dssp)
{
if (dspp->redactbook == NULL)
return;
rlt_arg->cancel = B_FALSE;
VERIFY0(bqueue_init(&rlt_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
rlt_arg->error_code = 0;
rlt_arg->mark_redact = B_TRUE;
rlt_arg->rl = rl;
rlt_arg->num_blocks_visited = &dssp->dss_blocks;
(void) thread_create(NULL, 0, redact_list_thread, rlt_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static void
setup_merge_thread(struct send_merge_thread_arg *smt_arg,
struct dmu_send_params *dspp, struct redact_list_thread_arg *from_arg,
struct send_thread_arg *to_arg, struct redact_list_thread_arg *rlt_arg,
objset_t *os)
{
VERIFY0(bqueue_init(&smt_arg->q, zfs_send_no_prefetch_queue_ff,
MAX(zfs_send_no_prefetch_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
smt_arg->cancel = B_FALSE;
smt_arg->error = 0;
smt_arg->from_arg = from_arg;
smt_arg->to_arg = to_arg;
if (dspp->redactbook != NULL)
smt_arg->redact_arg = rlt_arg;
smt_arg->os = os;
(void) thread_create(NULL, 0, send_merge_thread, smt_arg, 0, curproc,
TS_RUN, minclsyspri);
}
static void
setup_reader_thread(struct send_reader_thread_arg *srt_arg,
struct dmu_send_params *dspp, struct send_merge_thread_arg *smt_arg,
uint64_t featureflags)
{
VERIFY0(bqueue_init(&srt_arg->q, zfs_send_queue_ff,
MAX(zfs_send_queue_length, 2 * zfs_max_recordsize),
offsetof(struct send_range, ln)));
srt_arg->smta = smt_arg;
srt_arg->issue_reads = !dspp->dso->dso_dryrun;
srt_arg->featureflags = featureflags;
(void) thread_create(NULL, 0, send_reader_thread, srt_arg, 0,
curproc, TS_RUN, minclsyspri);
}
static int
setup_resume_points(struct dmu_send_params *dspp,
struct send_thread_arg *to_arg, struct redact_list_thread_arg *from_arg,
struct redact_list_thread_arg *rlt_arg,
struct send_merge_thread_arg *smt_arg, boolean_t resuming, objset_t *os,
redaction_list_t *redact_rl, nvlist_t *nvl)
{
dsl_dataset_t *to_ds = dspp->to_ds;
int err = 0;
uint64_t obj = 0;
uint64_t blkid = 0;
if (resuming) {
obj = dspp->resumeobj;
dmu_object_info_t to_doi;
err = dmu_object_info(os, obj, &to_doi);
if (err != 0)
return (err);
blkid = dspp->resumeoff / to_doi.doi_data_block_size;
}
/*
* If we're resuming a redacted send, we can skip to the appropriate
* point in the redaction bookmark by binary searching through it.
*/
if (redact_rl != NULL) {
SET_BOOKMARK(&rlt_arg->resume, to_ds->ds_object, obj, 0, blkid);
}
SET_BOOKMARK(&to_arg->resume, to_ds->ds_object, obj, 0, blkid);
if (nvlist_exists(nvl, BEGINNV_REDACT_FROM_SNAPS)) {
uint64_t objset = dspp->ancestor_zb.zbm_redaction_obj;
/*
* Note: If the resume point is in an object whose
* blocksize is different in the from vs to snapshots,
* we will have divided by the "wrong" blocksize.
* However, in this case fromsnap's send_cb() will
* detect that the blocksize has changed and therefore
* ignore this object.
*
* If we're resuming a send from a redaction bookmark,
* we still cannot accidentally suggest blocks behind
* the to_ds. In addition, we know that any blocks in
* the object in the to_ds will have to be sent, since
* the size changed. Therefore, we can't cause any harm
* this way either.
*/
SET_BOOKMARK(&from_arg->resume, objset, obj, 0, blkid);
}
if (resuming) {
fnvlist_add_uint64(nvl, BEGINNV_RESUME_OBJECT, dspp->resumeobj);
fnvlist_add_uint64(nvl, BEGINNV_RESUME_OFFSET, dspp->resumeoff);
}
return (0);
}
static dmu_sendstatus_t *
setup_send_progress(struct dmu_send_params *dspp)
{
dmu_sendstatus_t *dssp = kmem_zalloc(sizeof (*dssp), KM_SLEEP);
dssp->dss_outfd = dspp->outfd;
dssp->dss_off = dspp->off;
dssp->dss_proc = curproc;
mutex_enter(&dspp->to_ds->ds_sendstream_lock);
list_insert_head(&dspp->to_ds->ds_sendstreams, dssp);
mutex_exit(&dspp->to_ds->ds_sendstream_lock);
return (dssp);
}
/*
* Actually do the bulk of the work in a zfs send.
*
* The idea is that we want to do a send from ancestor_zb to to_ds. We also
* want to not send any data that has been modified by all the datasets in
* redactsnaparr, and store the list of blocks that are redacted in this way in
* a bookmark named redactbook, created on the to_ds. We do this by creating
* several worker threads, whose function is described below.
*
* There are three cases.
* The first case is a redacted zfs send. In this case there are 5 threads.
* The first thread is the to_ds traversal thread: it calls dataset_traverse on
* the to_ds and finds all the blocks that have changed since ancestor_zb (if
* it's a full send, that's all blocks in the dataset). It then sends those
* blocks on to the send merge thread. The redact list thread takes the data
* from the redaction bookmark and sends those blocks on to the send merge
* thread. The send merge thread takes the data from the to_ds traversal
* thread, and combines it with the redaction records from the redact list
* thread. If a block appears in both the to_ds's data and the redaction data,
* the send merge thread will mark it as redacted and send it on to the prefetch
* thread. Otherwise, the send merge thread will send the block on to the
* prefetch thread unchanged. The prefetch thread will issue prefetch reads for
* any data that isn't redacted, and then send the data on to the main thread.
* The main thread behaves the same as in a normal send case, issuing demand
* reads for data blocks and sending out records over the network
*
* The graphic below diagrams the flow of data in the case of a redacted zfs
* send. Each box represents a thread, and each line represents the flow of
* data.
*
* Records from the |
* redaction bookmark |
* +--------------------+ | +---------------------------+
* | | v | Send Merge Thread |
* | Redact List Thread +----------> Apply redaction marks to |
* | | | records as specified by |
* +--------------------+ | redaction ranges |
* +----^---------------+------+
* | | Merged data
* | |
* | +------------v--------+
* | | Prefetch Thread |
* +--------------------+ | | Issues prefetch |
* | to_ds Traversal | | | reads of data blocks|
* | Thread (finds +---------------+ +------------+--------+
* | candidate blocks) | Blocks modified | Prefetched data
* +--------------------+ by to_ds since |
* ancestor_zb +------------v----+
* | Main Thread | File Descriptor
* | Sends data over +->(to zfs receive)
* | wire |
* +-----------------+
*
* The second case is an incremental send from a redaction bookmark. The to_ds
* traversal thread and the main thread behave the same as in the redacted
* send case. The new thread is the from bookmark traversal thread. It
* iterates over the redaction list in the redaction bookmark, and enqueues
* records for each block that was redacted in the original send. The send
* merge thread now has to merge the data from the two threads. For details
* about that process, see the header comment of send_merge_thread(). Any data
* it decides to send on will be prefetched by the prefetch thread. Note that
* you can perform a redacted send from a redaction bookmark; in that case,
* the data flow behaves very similarly to the flow in the redacted send case,
* except with the addition of the bookmark traversal thread iterating over the
* redaction bookmark. The send_merge_thread also has to take on the
* responsibility of merging the redact list thread's records, the bookmark
* traversal thread's records, and the to_ds records.
*
* +---------------------+
* | |
* | Redact List Thread +--------------+
* | | |
* +---------------------+ |
* Blocks in redaction list | Ranges modified by every secure snap
* of from bookmark | (or EOS if not readcted)
* |
* +---------------------+ | +----v----------------------+
* | bookmark Traversal | v | Send Merge Thread |
* | Thread (finds +---------> Merges bookmark, rlt, and |
* | candidate blocks) | | to_ds send records |
* +---------------------+ +----^---------------+------+
* | | Merged data
* | +------------v--------+
* | | Prefetch Thread |
* +--------------------+ | | Issues prefetch |
* | to_ds Traversal | | | reads of data blocks|
* | Thread (finds +---------------+ +------------+--------+
* | candidate blocks) | Blocks modified | Prefetched data
* +--------------------+ by to_ds since +------------v----+
* ancestor_zb | Main Thread | File Descriptor
* | Sends data over +->(to zfs receive)
* | wire |
* +-----------------+
*
* The final case is a simple zfs full or incremental send. The to_ds traversal
* thread behaves the same as always. The redact list thread is never started.
* The send merge thread takes all the blocks that the to_ds traversal thread
* sends it, prefetches the data, and sends the blocks on to the main thread.
* The main thread sends the data over the wire.
*
* To keep performance acceptable, we want to prefetch the data in the worker
* threads. While the to_ds thread could simply use the TRAVERSE_PREFETCH
* feature built into traverse_dataset, the combining and deletion of records
* due to redaction and sends from redaction bookmarks mean that we could
* issue many unnecessary prefetches. As a result, we only prefetch data
* after we've determined that the record is not going to be redacted. To
* prevent the prefetching from getting too far ahead of the main thread, the
* blocking queues that are used for communication are capped not by the
* number of entries in the queue, but by the sum of the size of the
* prefetches associated with them. The limit on the amount of data that the
* thread can prefetch beyond what the main thread has reached is controlled
* by the global variable zfs_send_queue_length. In addition, to prevent poor
* performance in the beginning of a send, we also limit the distance ahead
* that the traversal threads can be. That distance is controlled by the
* zfs_send_no_prefetch_queue_length tunable.
*
* Note: Releases dp using the specified tag.
*/
static int
dmu_send_impl(struct dmu_send_params *dspp)
{
objset_t *os;
dmu_replay_record_t *drr;
dmu_sendstatus_t *dssp;
dmu_send_cookie_t dsc = {0};
int err;
uint64_t fromtxg = dspp->ancestor_zb.zbm_creation_txg;
uint64_t featureflags = 0;
struct redact_list_thread_arg *from_arg;
struct send_thread_arg *to_arg;
struct redact_list_thread_arg *rlt_arg;
struct send_merge_thread_arg *smt_arg;
struct send_reader_thread_arg *srt_arg;
struct send_range *range;
redaction_list_t *from_rl = NULL;
redaction_list_t *redact_rl = NULL;
boolean_t resuming = (dspp->resumeobj != 0 || dspp->resumeoff != 0);
boolean_t book_resuming = resuming;
dsl_dataset_t *to_ds = dspp->to_ds;
zfs_bookmark_phys_t *ancestor_zb = &dspp->ancestor_zb;
dsl_pool_t *dp = dspp->dp;
void *tag = dspp->tag;
err = dmu_objset_from_ds(to_ds, &os);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
/*
* If this is a non-raw send of an encrypted ds, we can ensure that
* the objset_phys_t is authenticated. This is safe because this is
* either a snapshot or we have owned the dataset, ensuring that
* it can't be modified.
*/
if (!dspp->rawok && os->os_encrypted &&
arc_is_unauthenticated(os->os_phys_buf)) {
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, to_ds->ds_object, ZB_ROOT_OBJECT,
ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
err = arc_untransform(os->os_phys_buf, os->os_spa,
&zb, B_FALSE);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
ASSERT0(arc_is_unauthenticated(os->os_phys_buf));
}
if ((err = setup_featureflags(dspp, os, &featureflags)) != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
/*
* If we're doing a redacted send, hold the bookmark's redaction list.
*/
if (dspp->redactbook != NULL) {
err = dsl_redaction_list_hold_obj(dp,
dspp->redactbook->zbm_redaction_obj, FTAG,
&redact_rl);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (SET_ERROR(EINVAL));
}
dsl_redaction_list_long_hold(dp, redact_rl, FTAG);
}
/*
* If we're sending from a redaction bookmark, hold the redaction list
* so that we can consider sending the redacted blocks.
*/
if (ancestor_zb->zbm_redaction_obj != 0) {
err = dsl_redaction_list_hold_obj(dp,
ancestor_zb->zbm_redaction_obj, FTAG, &from_rl);
if (err != 0) {
if (redact_rl != NULL) {
dsl_redaction_list_long_rele(redact_rl, FTAG);
dsl_redaction_list_rele(redact_rl, FTAG);
}
dsl_pool_rele(dp, tag);
return (SET_ERROR(EINVAL));
}
dsl_redaction_list_long_hold(dp, from_rl, FTAG);
}
dsl_dataset_long_hold(to_ds, FTAG);
from_arg = kmem_zalloc(sizeof (*from_arg), KM_SLEEP);
to_arg = kmem_zalloc(sizeof (*to_arg), KM_SLEEP);
rlt_arg = kmem_zalloc(sizeof (*rlt_arg), KM_SLEEP);
smt_arg = kmem_zalloc(sizeof (*smt_arg), KM_SLEEP);
srt_arg = kmem_zalloc(sizeof (*srt_arg), KM_SLEEP);
drr = create_begin_record(dspp, os, featureflags);
dssp = setup_send_progress(dspp);
dsc.dsc_drr = drr;
dsc.dsc_dso = dspp->dso;
dsc.dsc_os = os;
dsc.dsc_off = dspp->off;
dsc.dsc_toguid = dsl_dataset_phys(to_ds)->ds_guid;
dsc.dsc_fromtxg = fromtxg;
dsc.dsc_pending_op = PENDING_NONE;
dsc.dsc_featureflags = featureflags;
dsc.dsc_resume_object = dspp->resumeobj;
dsc.dsc_resume_offset = dspp->resumeoff;
dsl_pool_rele(dp, tag);
void *payload = NULL;
size_t payload_len = 0;
nvlist_t *nvl = fnvlist_alloc();
/*
* If we're doing a redacted send, we include the snapshots we're
* redacted with respect to so that the target system knows what send
* streams can be correctly received on top of this dataset. If we're
* instead sending a redacted dataset, we include the snapshots that the
* dataset was created with respect to.
*/
if (dspp->redactbook != NULL) {
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS,
redact_rl->rl_phys->rlp_snaps,
redact_rl->rl_phys->rlp_num_snaps);
} else if (dsl_dataset_feature_is_active(to_ds,
SPA_FEATURE_REDACTED_DATASETS)) {
uint64_t *tods_guids;
uint64_t length;
VERIFY(dsl_dataset_get_uint64_array_feature(to_ds,
SPA_FEATURE_REDACTED_DATASETS, &length, &tods_guids));
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_SNAPS, tods_guids,
length);
}
/*
* If we're sending from a redaction bookmark, then we should retrieve
* the guids of that bookmark so we can send them over the wire.
*/
if (from_rl != NULL) {
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
from_rl->rl_phys->rlp_snaps,
from_rl->rl_phys->rlp_num_snaps);
}
/*
* If the snapshot we're sending from is redacted, include the redaction
* list in the stream.
*/
if (dspp->numfromredactsnaps != NUM_SNAPS_NOT_REDACTED) {
ASSERT3P(from_rl, ==, NULL);
fnvlist_add_uint64_array(nvl, BEGINNV_REDACT_FROM_SNAPS,
dspp->fromredactsnaps, (uint_t)dspp->numfromredactsnaps);
if (dspp->numfromredactsnaps > 0) {
kmem_free(dspp->fromredactsnaps,
dspp->numfromredactsnaps * sizeof (uint64_t));
dspp->fromredactsnaps = NULL;
}
}
if (resuming || book_resuming) {
err = setup_resume_points(dspp, to_arg, from_arg,
rlt_arg, smt_arg, resuming, os, redact_rl, nvl);
if (err != 0)
goto out;
}
if (featureflags & DMU_BACKUP_FEATURE_RAW) {
uint64_t ivset_guid = (ancestor_zb != NULL) ?
ancestor_zb->zbm_ivset_guid : 0;
nvlist_t *keynvl = NULL;
ASSERT(os->os_encrypted);
err = dsl_crypto_populate_key_nvlist(os, ivset_guid,
&keynvl);
if (err != 0) {
fnvlist_free(nvl);
goto out;
}
fnvlist_add_nvlist(nvl, "crypt_keydata", keynvl);
fnvlist_free(keynvl);
}
if (!nvlist_empty(nvl)) {
payload = fnvlist_pack(nvl, &payload_len);
drr->drr_payloadlen = payload_len;
}
fnvlist_free(nvl);
err = dump_record(&dsc, payload, payload_len);
fnvlist_pack_free(payload, payload_len);
if (err != 0) {
err = dsc.dsc_err;
goto out;
}
setup_to_thread(to_arg, os, dssp, fromtxg, dspp->rawok);
setup_from_thread(from_arg, from_rl, dssp);
setup_redact_list_thread(rlt_arg, dspp, redact_rl, dssp);
setup_merge_thread(smt_arg, dspp, from_arg, to_arg, rlt_arg, os);
setup_reader_thread(srt_arg, dspp, smt_arg, featureflags);
range = bqueue_dequeue(&srt_arg->q);
while (err == 0 && !range->eos_marker) {
err = do_dump(&dsc, range);
range = get_next_range(&srt_arg->q, range);
if (issig(JUSTLOOKING) && issig(FORREAL))
err = SET_ERROR(EINTR);
}
/*
* If we hit an error or are interrupted, cancel our worker threads and
* clear the queue of any pending records. The threads will pass the
* cancel up the tree of worker threads, and each one will clean up any
* pending records before exiting.
*/
if (err != 0) {
srt_arg->cancel = B_TRUE;
while (!range->eos_marker) {
range = get_next_range(&srt_arg->q, range);
}
}
range_free(range);
bqueue_destroy(&srt_arg->q);
bqueue_destroy(&smt_arg->q);
if (dspp->redactbook != NULL)
bqueue_destroy(&rlt_arg->q);
bqueue_destroy(&to_arg->q);
bqueue_destroy(&from_arg->q);
if (err == 0 && srt_arg->error != 0)
err = srt_arg->error;
if (err != 0)
goto out;
if (dsc.dsc_pending_op != PENDING_NONE)
if (dump_record(&dsc, NULL, 0) != 0)
err = SET_ERROR(EINTR);
if (err != 0) {
if (err == EINTR && dsc.dsc_err != 0)
err = dsc.dsc_err;
goto out;
}
/*
* Send the DRR_END record if this is not a saved stream.
* Otherwise, the omitted DRR_END record will signal to
* the receive side that the stream is incomplete.
*/
if (!dspp->savedok) {
bzero(drr, sizeof (dmu_replay_record_t));
drr->drr_type = DRR_END;
drr->drr_u.drr_end.drr_checksum = dsc.dsc_zc;
drr->drr_u.drr_end.drr_toguid = dsc.dsc_toguid;
if (dump_record(&dsc, NULL, 0) != 0)
err = dsc.dsc_err;
}
out:
mutex_enter(&to_ds->ds_sendstream_lock);
list_remove(&to_ds->ds_sendstreams, dssp);
mutex_exit(&to_ds->ds_sendstream_lock);
VERIFY(err != 0 || (dsc.dsc_sent_begin &&
(dsc.dsc_sent_end || dspp->savedok)));
kmem_free(drr, sizeof (dmu_replay_record_t));
kmem_free(dssp, sizeof (dmu_sendstatus_t));
kmem_free(from_arg, sizeof (*from_arg));
kmem_free(to_arg, sizeof (*to_arg));
kmem_free(rlt_arg, sizeof (*rlt_arg));
kmem_free(smt_arg, sizeof (*smt_arg));
kmem_free(srt_arg, sizeof (*srt_arg));
dsl_dataset_long_rele(to_ds, FTAG);
if (from_rl != NULL) {
dsl_redaction_list_long_rele(from_rl, FTAG);
dsl_redaction_list_rele(from_rl, FTAG);
}
if (redact_rl != NULL) {
dsl_redaction_list_long_rele(redact_rl, FTAG);
dsl_redaction_list_rele(redact_rl, FTAG);
}
return (err);
}
int
dmu_send_obj(const char *pool, uint64_t tosnap, uint64_t fromsnap,
boolean_t embedok, boolean_t large_block_ok, boolean_t compressok,
boolean_t rawok, boolean_t savedok, int outfd, offset_t *off,
dmu_send_outparams_t *dsop)
{
int err;
dsl_dataset_t *fromds;
ds_hold_flags_t dsflags;
struct dmu_send_params dspp = {0};
dspp.embedok = embedok;
dspp.large_block_ok = large_block_ok;
dspp.compressok = compressok;
dspp.outfd = outfd;
dspp.off = off;
dspp.dso = dsop;
dspp.tag = FTAG;
dspp.rawok = rawok;
dspp.savedok = savedok;
dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
err = dsl_pool_hold(pool, FTAG, &dspp.dp);
if (err != 0)
return (err);
err = dsl_dataset_hold_obj_flags(dspp.dp, tosnap, dsflags, FTAG,
&dspp.to_ds);
if (err != 0) {
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
if (fromsnap != 0) {
err = dsl_dataset_hold_obj_flags(dspp.dp, fromsnap, dsflags,
FTAG, &fromds);
if (err != 0) {
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
dspp.ancestor_zb.zbm_guid = dsl_dataset_phys(fromds)->ds_guid;
dspp.ancestor_zb.zbm_creation_txg =
dsl_dataset_phys(fromds)->ds_creation_txg;
dspp.ancestor_zb.zbm_creation_time =
dsl_dataset_phys(fromds)->ds_creation_time;
if (dsl_dataset_is_zapified(fromds)) {
(void) zap_lookup(dspp.dp->dp_meta_objset,
fromds->ds_object, DS_FIELD_IVSET_GUID, 8, 1,
&dspp.ancestor_zb.zbm_ivset_guid);
}
/* See dmu_send for the reasons behind this. */
uint64_t *fromredact;
if (!dsl_dataset_get_uint64_array_feature(fromds,
SPA_FEATURE_REDACTED_DATASETS,
&dspp.numfromredactsnaps,
&fromredact)) {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
} else if (dspp.numfromredactsnaps > 0) {
uint64_t size = dspp.numfromredactsnaps *
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size, KM_SLEEP);
bcopy(fromredact, dspp.fromredactsnaps, size);
}
boolean_t is_before =
dsl_dataset_is_before(dspp.to_ds, fromds, 0);
dspp.is_clone = (dspp.to_ds->ds_dir !=
fromds->ds_dir);
dsl_dataset_rele(fromds, FTAG);
if (!is_before) {
dsl_pool_rele(dspp.dp, FTAG);
err = SET_ERROR(EXDEV);
} else {
err = dmu_send_impl(&dspp);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dmu_send_impl(&dspp);
}
dsl_dataset_rele(dspp.to_ds, FTAG);
return (err);
}
int
dmu_send(const char *tosnap, const char *fromsnap, boolean_t embedok,
boolean_t large_block_ok, boolean_t compressok, boolean_t rawok,
boolean_t savedok, uint64_t resumeobj, uint64_t resumeoff,
const char *redactbook, int outfd, offset_t *off,
dmu_send_outparams_t *dsop)
{
int err = 0;
ds_hold_flags_t dsflags;
boolean_t owned = B_FALSE;
dsl_dataset_t *fromds = NULL;
zfs_bookmark_phys_t book = {0};
struct dmu_send_params dspp = {0};
dsflags = (rawok) ? DS_HOLD_FLAG_NONE : DS_HOLD_FLAG_DECRYPT;
dspp.tosnap = tosnap;
dspp.embedok = embedok;
dspp.large_block_ok = large_block_ok;
dspp.compressok = compressok;
dspp.outfd = outfd;
dspp.off = off;
dspp.dso = dsop;
dspp.tag = FTAG;
dspp.resumeobj = resumeobj;
dspp.resumeoff = resumeoff;
dspp.rawok = rawok;
dspp.savedok = savedok;
if (fromsnap != NULL && strpbrk(fromsnap, "@#") == NULL)
return (SET_ERROR(EINVAL));
err = dsl_pool_hold(tosnap, FTAG, &dspp.dp);
if (err != 0)
return (err);
if (strchr(tosnap, '@') == NULL && spa_writeable(dspp.dp->dp_spa)) {
/*
* We are sending a filesystem or volume. Ensure
* that it doesn't change by owning the dataset.
*/
if (savedok) {
/*
* We are looking for the dataset that represents the
* partially received send stream. If this stream was
* received as a new snapshot of an existing dataset,
* this will be saved in a hidden clone named
* "<pool>/<dataset>/%recv". Otherwise, the stream
* will be saved in the live dataset itself. In
* either case we need to use dsl_dataset_own_force()
* because the stream is marked as inconsistent,
* which would normally make it unavailable to be
* owned.
*/
char *name = kmem_asprintf("%s/%s", tosnap,
recv_clone_name);
err = dsl_dataset_own_force(dspp.dp, name, dsflags,
FTAG, &dspp.to_ds);
if (err == ENOENT) {
err = dsl_dataset_own_force(dspp.dp, tosnap,
dsflags, FTAG, &dspp.to_ds);
}
if (err == 0) {
err = zap_lookup(dspp.dp->dp_meta_objset,
dspp.to_ds->ds_object,
DS_FIELD_RESUME_TOGUID, 8, 1,
&dspp.saved_guid);
}
if (err == 0) {
err = zap_lookup(dspp.dp->dp_meta_objset,
dspp.to_ds->ds_object,
DS_FIELD_RESUME_TONAME, 1,
sizeof (dspp.saved_toname),
dspp.saved_toname);
}
if (err != 0)
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
kmem_strfree(name);
} else {
err = dsl_dataset_own(dspp.dp, tosnap, dsflags,
FTAG, &dspp.to_ds);
}
owned = B_TRUE;
} else {
err = dsl_dataset_hold_flags(dspp.dp, tosnap, dsflags, FTAG,
&dspp.to_ds);
}
if (err != 0) {
dsl_pool_rele(dspp.dp, FTAG);
return (err);
}
if (redactbook != NULL) {
char path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(path, tosnap, sizeof (path));
char *at = strchr(path, '@');
if (at == NULL) {
err = EINVAL;
} else {
(void) snprintf(at, sizeof (path) - (at - path), "#%s",
redactbook);
err = dsl_bookmark_lookup(dspp.dp, path,
NULL, &book);
dspp.redactbook = &book;
}
}
if (err != 0) {
dsl_pool_rele(dspp.dp, FTAG);
if (owned)
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
else
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
return (err);
}
if (fromsnap != NULL) {
zfs_bookmark_phys_t *zb = &dspp.ancestor_zb;
int fsnamelen;
if (strpbrk(tosnap, "@#") != NULL)
fsnamelen = strpbrk(tosnap, "@#") - tosnap;
else
fsnamelen = strlen(tosnap);
/*
* If the fromsnap is in a different filesystem, then
* mark the send stream as a clone.
*/
if (strncmp(tosnap, fromsnap, fsnamelen) != 0 ||
(fromsnap[fsnamelen] != '@' &&
fromsnap[fsnamelen] != '#')) {
dspp.is_clone = B_TRUE;
}
if (strchr(fromsnap, '@') != NULL) {
err = dsl_dataset_hold(dspp.dp, fromsnap, FTAG,
&fromds);
if (err != 0) {
ASSERT3P(fromds, ==, NULL);
} else {
/*
* We need to make a deep copy of the redact
* snapshots of the from snapshot, because the
* array will be freed when we evict from_ds.
*/
uint64_t *fromredact;
if (!dsl_dataset_get_uint64_array_feature(
fromds, SPA_FEATURE_REDACTED_DATASETS,
&dspp.numfromredactsnaps,
&fromredact)) {
dspp.numfromredactsnaps =
NUM_SNAPS_NOT_REDACTED;
} else if (dspp.numfromredactsnaps > 0) {
uint64_t size =
dspp.numfromredactsnaps *
sizeof (uint64_t);
dspp.fromredactsnaps = kmem_zalloc(size,
KM_SLEEP);
bcopy(fromredact, dspp.fromredactsnaps,
size);
}
if (!dsl_dataset_is_before(dspp.to_ds, fromds,
0)) {
err = SET_ERROR(EXDEV);
} else {
zb->zbm_creation_txg =
dsl_dataset_phys(fromds)->
ds_creation_txg;
zb->zbm_creation_time =
dsl_dataset_phys(fromds)->
ds_creation_time;
zb->zbm_guid =
dsl_dataset_phys(fromds)->ds_guid;
zb->zbm_redaction_obj = 0;
if (dsl_dataset_is_zapified(fromds)) {
(void) zap_lookup(
dspp.dp->dp_meta_objset,
fromds->ds_object,
DS_FIELD_IVSET_GUID, 8, 1,
&zb->zbm_ivset_guid);
}
}
dsl_dataset_rele(fromds, FTAG);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dsl_bookmark_lookup(dspp.dp, fromsnap, dspp.to_ds,
zb);
if (err == EXDEV && zb->zbm_redaction_obj != 0 &&
zb->zbm_guid ==
dsl_dataset_phys(dspp.to_ds)->ds_guid)
err = 0;
}
if (err == 0) {
/* dmu_send_impl will call dsl_pool_rele for us. */
err = dmu_send_impl(&dspp);
} else {
dsl_pool_rele(dspp.dp, FTAG);
}
} else {
dspp.numfromredactsnaps = NUM_SNAPS_NOT_REDACTED;
err = dmu_send_impl(&dspp);
}
if (owned)
dsl_dataset_disown(dspp.to_ds, dsflags, FTAG);
else
dsl_dataset_rele_flags(dspp.to_ds, dsflags, FTAG);
return (err);
}
static int
dmu_adjust_send_estimate_for_indirects(dsl_dataset_t *ds, uint64_t uncompressed,
uint64_t compressed, boolean_t stream_compressed, uint64_t *sizep)
{
int err = 0;
uint64_t size;
/*
* Assume that space (both on-disk and in-stream) is dominated by
* data. We will adjust for indirect blocks and the copies property,
* but ignore per-object space used (eg, dnodes and DRR_OBJECT records).
*/
uint64_t recordsize;
uint64_t record_count;
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os));
/* Assume all (uncompressed) blocks are recordsize. */
if (zfs_override_estimate_recordsize != 0) {
recordsize = zfs_override_estimate_recordsize;
} else if (os->os_phys->os_type == DMU_OST_ZVOL) {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &recordsize);
} else {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE), &recordsize);
}
if (err != 0)
return (err);
record_count = uncompressed / recordsize;
/*
* If we're estimating a send size for a compressed stream, use the
* compressed data size to estimate the stream size. Otherwise, use the
* uncompressed data size.
*/
size = stream_compressed ? compressed : uncompressed;
/*
* Subtract out approximate space used by indirect blocks.
* Assume most space is used by data blocks (non-indirect, non-dnode).
* Assume no ditto blocks or internal fragmentation.
*
* Therefore, space used by indirect blocks is sizeof(blkptr_t) per
* block.
*/
size -= record_count * sizeof (blkptr_t);
/* Add in the space for the record associated with each block. */
size += record_count * sizeof (dmu_replay_record_t);
*sizep = size;
return (0);
}
int
dmu_send_estimate_fast(dsl_dataset_t *origds, dsl_dataset_t *fromds,
zfs_bookmark_phys_t *frombook, boolean_t stream_compressed,
boolean_t saved, uint64_t *sizep)
{
int err;
dsl_dataset_t *ds = origds;
uint64_t uncomp, comp;
ASSERT(dsl_pool_config_held(origds->ds_dir->dd_pool));
ASSERT(fromds == NULL || frombook == NULL);
/*
* If this is a saved send we may actually be sending
* from the %recv clone used for resuming.
*/
if (saved) {
objset_t *mos = origds->ds_dir->dd_pool->dp_meta_objset;
uint64_t guid;
char dsname[ZFS_MAX_DATASET_NAME_LEN + 6];
dsl_dataset_name(origds, dsname);
(void) strcat(dsname, "/");
(void) strcat(dsname, recv_clone_name);
err = dsl_dataset_hold(origds->ds_dir->dd_pool,
dsname, FTAG, &ds);
if (err != ENOENT && err != 0) {
return (err);
} else if (err == ENOENT) {
ds = origds;
}
/* check that this dataset has partially received data */
err = zap_lookup(mos, ds->ds_object,
DS_FIELD_RESUME_TOGUID, 8, 1, &guid);
if (err != 0) {
err = SET_ERROR(err == ENOENT ? EINVAL : err);
goto out;
}
err = zap_lookup(mos, ds->ds_object,
DS_FIELD_RESUME_TONAME, 1, sizeof (dsname), dsname);
if (err != 0) {
err = SET_ERROR(err == ENOENT ? EINVAL : err);
goto out;
}
}
/* tosnap must be a snapshot or the target of a saved send */
if (!ds->ds_is_snapshot && ds == origds)
return (SET_ERROR(EINVAL));
if (fromds != NULL) {
uint64_t used;
if (!fromds->ds_is_snapshot) {
err = SET_ERROR(EINVAL);
goto out;
}
if (!dsl_dataset_is_before(ds, fromds, 0)) {
err = SET_ERROR(EXDEV);
goto out;
}
err = dsl_dataset_space_written(fromds, ds, &used, &comp,
&uncomp);
if (err != 0)
goto out;
} else if (frombook != NULL) {
uint64_t used;
err = dsl_dataset_space_written_bookmark(frombook, ds, &used,
&comp, &uncomp);
if (err != 0)
goto out;
} else {
uncomp = dsl_dataset_phys(ds)->ds_uncompressed_bytes;
comp = dsl_dataset_phys(ds)->ds_compressed_bytes;
}
err = dmu_adjust_send_estimate_for_indirects(ds, uncomp, comp,
stream_compressed, sizep);
/*
* Add the size of the BEGIN and END records to the estimate.
*/
*sizep += 2 * sizeof (dmu_replay_record_t);
out:
if (ds != origds)
dsl_dataset_rele(ds, FTAG);
return (err);
}
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_send, zfs_send_, corrupt_data, INT, ZMOD_RW,
"Allow sending corrupt data");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_length, INT, ZMOD_RW,
"Maximum send queue length");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, unmodified_spill_blocks, INT, ZMOD_RW,
"Send unmodified spill blocks");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_length, INT, ZMOD_RW,
"Maximum send queue length for non-prefetch queues");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, queue_ff, INT, ZMOD_RW,
"Send queue fill fraction");
ZFS_MODULE_PARAM(zfs_send, zfs_send_, no_prefetch_queue_ff, INT, ZMOD_RW,
"Send queue fill fraction for non-prefetch queues");
ZFS_MODULE_PARAM(zfs_send, zfs_, override_estimate_recordsize, INT, ZMOD_RW,
"Override block size estimate with fixed size");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/dsl_prop.c b/sys/contrib/openzfs/module/zfs/dsl_prop.c
index 0787fcdad9b4..dfa04d7681be 100644
--- a/sys/contrib/openzfs/module/zfs/dsl_prop.c
+++ b/sys/contrib/openzfs/module/zfs/dsl_prop.c
@@ -1,1287 +1,1287 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2013 Martin Matuska. All rights reserved.
* Copyright 2019 Joyent, Inc.
*/
#include <sys/zfs_context.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include "zfs_prop.h"
#define ZPROP_INHERIT_SUFFIX "$inherit"
#define ZPROP_RECVD_SUFFIX "$recvd"
static int
dodefault(zfs_prop_t prop, int intsz, int numints, void *buf)
{
/*
* The setonce properties are read-only, BUT they still
* have a default value that can be used as the initial
* value.
*/
if (prop == ZPROP_INVAL ||
(zfs_prop_readonly(prop) && !zfs_prop_setonce(prop)))
return (SET_ERROR(ENOENT));
if (zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
if (intsz != 1)
return (SET_ERROR(EOVERFLOW));
(void) strncpy(buf, zfs_prop_default_string(prop),
numints);
} else {
if (intsz != 8 || numints < 1)
return (SET_ERROR(EOVERFLOW));
*(uint64_t *)buf = zfs_prop_default_numeric(prop);
}
return (0);
}
int
dsl_prop_get_dd(dsl_dir_t *dd, const char *propname,
int intsz, int numints, void *buf, char *setpoint, boolean_t snapshot)
{
int err;
dsl_dir_t *target = dd;
objset_t *mos = dd->dd_pool->dp_meta_objset;
zfs_prop_t prop;
boolean_t inheritable;
boolean_t inheriting = B_FALSE;
char *inheritstr;
char *recvdstr;
ASSERT(dsl_pool_config_held(dd->dd_pool));
if (setpoint)
setpoint[0] = '\0';
prop = zfs_name_to_prop(propname);
inheritable = (prop == ZPROP_INVAL || zfs_prop_inheritable(prop));
inheritstr = kmem_asprintf("%s%s", propname, ZPROP_INHERIT_SUFFIX);
recvdstr = kmem_asprintf("%s%s", propname, ZPROP_RECVD_SUFFIX);
/*
* Note: dd may become NULL, therefore we shouldn't dereference it
* after this loop.
*/
for (; dd != NULL; dd = dd->dd_parent) {
if (dd != target || snapshot) {
if (!inheritable) {
err = SET_ERROR(ENOENT);
break;
}
inheriting = B_TRUE;
}
/* Check for a local value. */
err = zap_lookup(mos, dsl_dir_phys(dd)->dd_props_zapobj,
propname, intsz, numints, buf);
if (err != ENOENT) {
if (setpoint != NULL && err == 0)
dsl_dir_name(dd, setpoint);
break;
}
/*
* Skip the check for a received value if there is an explicit
* inheritance entry.
*/
err = zap_contains(mos, dsl_dir_phys(dd)->dd_props_zapobj,
inheritstr);
if (err != 0 && err != ENOENT)
break;
if (err == ENOENT) {
/* Check for a received value. */
err = zap_lookup(mos, dsl_dir_phys(dd)->dd_props_zapobj,
recvdstr, intsz, numints, buf);
if (err != ENOENT) {
if (setpoint != NULL && err == 0) {
if (inheriting) {
dsl_dir_name(dd, setpoint);
} else {
(void) strlcpy(setpoint,
ZPROP_SOURCE_VAL_RECVD,
MAXNAMELEN);
}
}
break;
}
}
/*
* If we found an explicit inheritance entry, err is zero even
* though we haven't yet found the value, so reinitializing err
* at the end of the loop (instead of at the beginning) ensures
* that err has a valid post-loop value.
*/
err = SET_ERROR(ENOENT);
}
if (err == ENOENT)
err = dodefault(prop, intsz, numints, buf);
kmem_strfree(inheritstr);
kmem_strfree(recvdstr);
return (err);
}
int
dsl_prop_get_ds(dsl_dataset_t *ds, const char *propname,
int intsz, int numints, void *buf, char *setpoint)
{
zfs_prop_t prop = zfs_name_to_prop(propname);
boolean_t inheritable;
uint64_t zapobj;
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
inheritable = (prop == ZPROP_INVAL || zfs_prop_inheritable(prop));
zapobj = dsl_dataset_phys(ds)->ds_props_obj;
if (zapobj != 0) {
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
int err;
ASSERT(ds->ds_is_snapshot);
/* Check for a local value. */
err = zap_lookup(mos, zapobj, propname, intsz, numints, buf);
if (err != ENOENT) {
if (setpoint != NULL && err == 0)
dsl_dataset_name(ds, setpoint);
return (err);
}
/*
* Skip the check for a received value if there is an explicit
* inheritance entry.
*/
if (inheritable) {
char *inheritstr = kmem_asprintf("%s%s", propname,
ZPROP_INHERIT_SUFFIX);
err = zap_contains(mos, zapobj, inheritstr);
kmem_strfree(inheritstr);
if (err != 0 && err != ENOENT)
return (err);
}
if (err == ENOENT) {
/* Check for a received value. */
char *recvdstr = kmem_asprintf("%s%s", propname,
ZPROP_RECVD_SUFFIX);
err = zap_lookup(mos, zapobj, recvdstr,
intsz, numints, buf);
kmem_strfree(recvdstr);
if (err != ENOENT) {
if (setpoint != NULL && err == 0)
(void) strlcpy(setpoint,
ZPROP_SOURCE_VAL_RECVD,
MAXNAMELEN);
return (err);
}
}
}
return (dsl_prop_get_dd(ds->ds_dir, propname,
intsz, numints, buf, setpoint, ds->ds_is_snapshot));
}
static dsl_prop_record_t *
dsl_prop_record_find(dsl_dir_t *dd, const char *propname)
{
dsl_prop_record_t *pr = NULL;
ASSERT(MUTEX_HELD(&dd->dd_lock));
for (pr = list_head(&dd->dd_props);
pr != NULL; pr = list_next(&dd->dd_props, pr)) {
if (strcmp(pr->pr_propname, propname) == 0)
break;
}
return (pr);
}
static dsl_prop_record_t *
dsl_prop_record_create(dsl_dir_t *dd, const char *propname)
{
dsl_prop_record_t *pr;
ASSERT(MUTEX_HELD(&dd->dd_lock));
pr = kmem_alloc(sizeof (dsl_prop_record_t), KM_SLEEP);
pr->pr_propname = spa_strdup(propname);
list_create(&pr->pr_cbs, sizeof (dsl_prop_cb_record_t),
offsetof(dsl_prop_cb_record_t, cbr_pr_node));
list_insert_head(&dd->dd_props, pr);
return (pr);
}
void
dsl_prop_init(dsl_dir_t *dd)
{
list_create(&dd->dd_props, sizeof (dsl_prop_record_t),
offsetof(dsl_prop_record_t, pr_node));
}
void
dsl_prop_fini(dsl_dir_t *dd)
{
dsl_prop_record_t *pr;
while ((pr = list_remove_head(&dd->dd_props)) != NULL) {
list_destroy(&pr->pr_cbs);
spa_strfree((char *)pr->pr_propname);
kmem_free(pr, sizeof (dsl_prop_record_t));
}
list_destroy(&dd->dd_props);
}
/*
* Register interest in the named property. We'll call the callback
* once to notify it of the current property value, and again each time
* the property changes, until this callback is unregistered.
*
* Return 0 on success, errno if the prop is not an integer value.
*/
int
dsl_prop_register(dsl_dataset_t *ds, const char *propname,
dsl_prop_changed_cb_t *callback, void *cbarg)
{
dsl_dir_t *dd = ds->ds_dir;
uint64_t value;
dsl_prop_record_t *pr;
dsl_prop_cb_record_t *cbr;
int err;
dsl_pool_t *dp __maybe_unused = dd->dd_pool;
ASSERT(dsl_pool_config_held(dp));
err = dsl_prop_get_int_ds(ds, propname, &value);
if (err != 0)
return (err);
cbr = kmem_alloc(sizeof (dsl_prop_cb_record_t), KM_SLEEP);
cbr->cbr_ds = ds;
cbr->cbr_func = callback;
cbr->cbr_arg = cbarg;
mutex_enter(&dd->dd_lock);
pr = dsl_prop_record_find(dd, propname);
if (pr == NULL)
pr = dsl_prop_record_create(dd, propname);
cbr->cbr_pr = pr;
list_insert_head(&pr->pr_cbs, cbr);
list_insert_head(&ds->ds_prop_cbs, cbr);
mutex_exit(&dd->dd_lock);
cbr->cbr_func(cbr->cbr_arg, value);
return (0);
}
int
dsl_prop_get(const char *dsname, const char *propname,
int intsz, int numints, void *buf, char *setpoint)
{
objset_t *os;
int error;
error = dmu_objset_hold(dsname, FTAG, &os);
if (error != 0)
return (error);
error = dsl_prop_get_ds(dmu_objset_ds(os), propname,
intsz, numints, buf, setpoint);
dmu_objset_rele(os, FTAG);
return (error);
}
/*
* Get the current property value. It may have changed by the time this
* function returns, so it is NOT safe to follow up with
* dsl_prop_register() and assume that the value has not changed in
* between.
*
* Return 0 on success, ENOENT if ddname is invalid.
*/
int
dsl_prop_get_integer(const char *ddname, const char *propname,
uint64_t *valuep, char *setpoint)
{
return (dsl_prop_get(ddname, propname, 8, 1, valuep, setpoint));
}
int
dsl_prop_get_int_ds(dsl_dataset_t *ds, const char *propname,
uint64_t *valuep)
{
return (dsl_prop_get_ds(ds, propname, 8, 1, valuep, NULL));
}
/*
* Predict the effective value of the given special property if it were set with
* the given value and source. This is not a general purpose function. It exists
* only to handle the special requirements of the quota and reservation
* properties. The fact that these properties are non-inheritable greatly
* simplifies the prediction logic.
*
* Returns 0 on success, a positive error code on failure, or -1 if called with
* a property not handled by this function.
*/
int
dsl_prop_predict(dsl_dir_t *dd, const char *propname,
zprop_source_t source, uint64_t value, uint64_t *newvalp)
{
zfs_prop_t prop = zfs_name_to_prop(propname);
objset_t *mos;
uint64_t zapobj;
uint64_t version;
char *recvdstr;
int err = 0;
switch (prop) {
case ZFS_PROP_QUOTA:
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFQUOTA:
case ZFS_PROP_REFRESERVATION:
break;
default:
return (-1);
}
mos = dd->dd_pool->dp_meta_objset;
zapobj = dsl_dir_phys(dd)->dd_props_zapobj;
recvdstr = kmem_asprintf("%s%s", propname, ZPROP_RECVD_SUFFIX);
version = spa_version(dd->dd_pool->dp_spa);
if (version < SPA_VERSION_RECVD_PROPS) {
if (source & ZPROP_SRC_NONE)
source = ZPROP_SRC_NONE;
else if (source & ZPROP_SRC_RECEIVED)
source = ZPROP_SRC_LOCAL;
}
switch ((int)source) {
case ZPROP_SRC_NONE:
/* Revert to the received value, if any. */
err = zap_lookup(mos, zapobj, recvdstr, 8, 1, newvalp);
if (err == ENOENT)
*newvalp = 0;
break;
case ZPROP_SRC_LOCAL:
*newvalp = value;
break;
case ZPROP_SRC_RECEIVED:
/*
* If there's no local setting, then the new received value will
* be the effective value.
*/
err = zap_lookup(mos, zapobj, propname, 8, 1, newvalp);
if (err == ENOENT)
*newvalp = value;
break;
case (ZPROP_SRC_NONE | ZPROP_SRC_RECEIVED):
/*
* We're clearing the received value, so the local setting (if
* it exists) remains the effective value.
*/
err = zap_lookup(mos, zapobj, propname, 8, 1, newvalp);
if (err == ENOENT)
*newvalp = 0;
break;
default:
panic("unexpected property source: %d", source);
}
kmem_strfree(recvdstr);
if (err == ENOENT)
return (0);
return (err);
}
/*
* Unregister this callback. Return 0 on success, ENOENT if ddname is
* invalid, or ENOMSG if no matching callback registered.
*
* NOTE: This function is no longer used internally but has been preserved
* to prevent breaking external consumers (Lustre, etc).
*/
int
dsl_prop_unregister(dsl_dataset_t *ds, const char *propname,
dsl_prop_changed_cb_t *callback, void *cbarg)
{
dsl_dir_t *dd = ds->ds_dir;
dsl_prop_cb_record_t *cbr;
mutex_enter(&dd->dd_lock);
for (cbr = list_head(&ds->ds_prop_cbs);
cbr; cbr = list_next(&ds->ds_prop_cbs, cbr)) {
if (cbr->cbr_ds == ds &&
cbr->cbr_func == callback &&
cbr->cbr_arg == cbarg &&
strcmp(cbr->cbr_pr->pr_propname, propname) == 0)
break;
}
if (cbr == NULL) {
mutex_exit(&dd->dd_lock);
return (SET_ERROR(ENOMSG));
}
list_remove(&ds->ds_prop_cbs, cbr);
list_remove(&cbr->cbr_pr->pr_cbs, cbr);
mutex_exit(&dd->dd_lock);
kmem_free(cbr, sizeof (dsl_prop_cb_record_t));
return (0);
}
/*
* Unregister all callbacks that are registered with the
* given callback argument.
*/
void
dsl_prop_unregister_all(dsl_dataset_t *ds, void *cbarg)
{
dsl_prop_cb_record_t *cbr, *next_cbr;
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_lock);
next_cbr = list_head(&ds->ds_prop_cbs);
while (next_cbr != NULL) {
cbr = next_cbr;
next_cbr = list_next(&ds->ds_prop_cbs, cbr);
if (cbr->cbr_arg == cbarg) {
list_remove(&ds->ds_prop_cbs, cbr);
list_remove(&cbr->cbr_pr->pr_cbs, cbr);
kmem_free(cbr, sizeof (dsl_prop_cb_record_t));
}
}
mutex_exit(&dd->dd_lock);
}
boolean_t
dsl_prop_hascb(dsl_dataset_t *ds)
{
return (!list_is_empty(&ds->ds_prop_cbs));
}
/* ARGSUSED */
static int
dsl_prop_notify_all_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
dsl_dir_t *dd = ds->ds_dir;
dsl_prop_record_t *pr;
dsl_prop_cb_record_t *cbr;
mutex_enter(&dd->dd_lock);
for (pr = list_head(&dd->dd_props);
pr; pr = list_next(&dd->dd_props, pr)) {
for (cbr = list_head(&pr->pr_cbs); cbr;
cbr = list_next(&pr->pr_cbs, cbr)) {
uint64_t value;
/*
* Callback entries do not have holds on their
* datasets so that datasets with registered
* callbacks are still eligible for eviction.
* Unlike operations to update properties on a
* single dataset, we are performing a recursive
* descent of related head datasets. The caller
* of this function only has a dataset hold on
* the passed in head dataset, not the snapshots
* associated with this dataset. Without a hold,
* the dataset pointer within callback records
* for snapshots can be invalidated by eviction
* at any time.
*
* Use dsl_dataset_try_add_ref() to verify
* that the dataset for a snapshot has not
* begun eviction processing and to prevent
* eviction from occurring for the duration of
* the callback. If the hold attempt fails,
* this object is already being evicted and the
* callback can be safely ignored.
*/
if (ds != cbr->cbr_ds &&
!dsl_dataset_try_add_ref(dp, cbr->cbr_ds, FTAG))
continue;
if (dsl_prop_get_ds(cbr->cbr_ds,
cbr->cbr_pr->pr_propname, sizeof (value), 1,
&value, NULL) == 0)
cbr->cbr_func(cbr->cbr_arg, value);
if (ds != cbr->cbr_ds)
dsl_dataset_rele(cbr->cbr_ds, FTAG);
}
}
mutex_exit(&dd->dd_lock);
return (0);
}
/*
* Update all property values for ddobj & its descendants. This is used
* when renaming the dir.
*/
void
dsl_prop_notify_all(dsl_dir_t *dd)
{
dsl_pool_t *dp = dd->dd_pool;
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
(void) dmu_objset_find_dp(dp, dd->dd_object, dsl_prop_notify_all_cb,
NULL, DS_FIND_CHILDREN);
}
static void
dsl_prop_changed_notify(dsl_pool_t *dp, uint64_t ddobj,
const char *propname, uint64_t value, int first)
{
dsl_dir_t *dd;
dsl_prop_record_t *pr;
dsl_prop_cb_record_t *cbr;
objset_t *mos = dp->dp_meta_objset;
zap_cursor_t zc;
zap_attribute_t *za;
int err;
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
err = dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd);
if (err)
return;
if (!first) {
/*
* If the prop is set here, then this change is not
* being inherited here or below; stop the recursion.
*/
err = zap_contains(mos, dsl_dir_phys(dd)->dd_props_zapobj,
propname);
if (err == 0) {
dsl_dir_rele(dd, FTAG);
return;
}
ASSERT3U(err, ==, ENOENT);
}
mutex_enter(&dd->dd_lock);
pr = dsl_prop_record_find(dd, propname);
if (pr != NULL) {
for (cbr = list_head(&pr->pr_cbs); cbr;
cbr = list_next(&pr->pr_cbs, cbr)) {
uint64_t propobj;
/*
* cbr->cbr_ds may be invalidated due to eviction,
* requiring the use of dsl_dataset_try_add_ref().
* See comment block in dsl_prop_notify_all_cb()
* for details.
*/
if (!dsl_dataset_try_add_ref(dp, cbr->cbr_ds, FTAG))
continue;
propobj = dsl_dataset_phys(cbr->cbr_ds)->ds_props_obj;
/*
* If the property is not set on this ds, then it is
* inherited here; call the callback.
*/
if (propobj == 0 ||
zap_contains(mos, propobj, propname) != 0)
cbr->cbr_func(cbr->cbr_arg, value);
dsl_dataset_rele(cbr->cbr_ds, FTAG);
}
}
mutex_exit(&dd->dd_lock);
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
for (zap_cursor_init(&zc, mos,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(&zc, za) == 0;
zap_cursor_advance(&zc)) {
dsl_prop_changed_notify(dp, za->za_first_integer,
propname, value, FALSE);
}
kmem_free(za, sizeof (zap_attribute_t));
zap_cursor_fini(&zc);
dsl_dir_rele(dd, FTAG);
}
void
dsl_prop_set_sync_impl(dsl_dataset_t *ds, const char *propname,
zprop_source_t source, int intsz, int numints, const void *value,
dmu_tx_t *tx)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t zapobj, intval, dummy, count;
int isint;
char valbuf[32];
const char *valstr = NULL;
char *inheritstr;
char *recvdstr;
char *tbuf = NULL;
int err;
uint64_t version = spa_version(ds->ds_dir->dd_pool->dp_spa);
isint = (dodefault(zfs_name_to_prop(propname), 8, 1, &intval) == 0);
if (ds->ds_is_snapshot) {
ASSERT(version >= SPA_VERSION_SNAP_PROPS);
if (dsl_dataset_phys(ds)->ds_props_obj == 0 &&
(source & ZPROP_SRC_NONE) == 0) {
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_props_obj =
zap_create(mos,
DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
}
zapobj = dsl_dataset_phys(ds)->ds_props_obj;
} else {
zapobj = dsl_dir_phys(ds->ds_dir)->dd_props_zapobj;
}
/* If we are removing objects from a non-existent ZAP just return */
if (zapobj == 0)
return;
if (version < SPA_VERSION_RECVD_PROPS) {
if (source & ZPROP_SRC_NONE)
source = ZPROP_SRC_NONE;
else if (source & ZPROP_SRC_RECEIVED)
source = ZPROP_SRC_LOCAL;
}
inheritstr = kmem_asprintf("%s%s", propname, ZPROP_INHERIT_SUFFIX);
recvdstr = kmem_asprintf("%s%s", propname, ZPROP_RECVD_SUFFIX);
switch ((int)source) {
case ZPROP_SRC_NONE:
/*
* revert to received value, if any (inherit -S)
* - remove propname
* - remove propname$inherit
*/
err = zap_remove(mos, zapobj, propname, tx);
ASSERT(err == 0 || err == ENOENT);
err = zap_remove(mos, zapobj, inheritstr, tx);
ASSERT(err == 0 || err == ENOENT);
break;
case ZPROP_SRC_LOCAL:
/*
* remove propname$inherit
* set propname -> value
*/
err = zap_remove(mos, zapobj, inheritstr, tx);
ASSERT(err == 0 || err == ENOENT);
VERIFY0(zap_update(mos, zapobj, propname,
intsz, numints, value, tx));
break;
case ZPROP_SRC_INHERITED:
/*
* explicitly inherit
* - remove propname
* - set propname$inherit
*/
err = zap_remove(mos, zapobj, propname, tx);
ASSERT(err == 0 || err == ENOENT);
if (version >= SPA_VERSION_RECVD_PROPS &&
dsl_prop_get_int_ds(ds, ZPROP_HAS_RECVD, &dummy) == 0) {
dummy = 0;
VERIFY0(zap_update(mos, zapobj, inheritstr,
8, 1, &dummy, tx));
}
break;
case ZPROP_SRC_RECEIVED:
/*
* set propname$recvd -> value
*/
err = zap_update(mos, zapobj, recvdstr,
intsz, numints, value, tx);
ASSERT(err == 0);
break;
case (ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED):
/*
* clear local and received settings
* - remove propname
* - remove propname$inherit
* - remove propname$recvd
*/
err = zap_remove(mos, zapobj, propname, tx);
ASSERT(err == 0 || err == ENOENT);
err = zap_remove(mos, zapobj, inheritstr, tx);
ASSERT(err == 0 || err == ENOENT);
- /* FALLTHROUGH */
+ fallthrough;
case (ZPROP_SRC_NONE | ZPROP_SRC_RECEIVED):
/*
* remove propname$recvd
*/
err = zap_remove(mos, zapobj, recvdstr, tx);
ASSERT(err == 0 || err == ENOENT);
break;
default:
cmn_err(CE_PANIC, "unexpected property source: %d", source);
}
kmem_strfree(inheritstr);
kmem_strfree(recvdstr);
/*
* If we are left with an empty snap zap we can destroy it.
* This will prevent unnecessary calls to zap_lookup() in
* the "zfs list" and "zfs get" code paths.
*/
if (ds->ds_is_snapshot &&
zap_count(mos, zapobj, &count) == 0 && count == 0) {
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_props_obj = 0;
zap_destroy(mos, zapobj, tx);
}
if (isint) {
VERIFY0(dsl_prop_get_int_ds(ds, propname, &intval));
if (ds->ds_is_snapshot) {
dsl_prop_cb_record_t *cbr;
/*
* It's a snapshot; nothing can inherit this
* property, so just look for callbacks on this
* ds here.
*/
mutex_enter(&ds->ds_dir->dd_lock);
for (cbr = list_head(&ds->ds_prop_cbs); cbr;
cbr = list_next(&ds->ds_prop_cbs, cbr)) {
if (strcmp(cbr->cbr_pr->pr_propname,
propname) == 0)
cbr->cbr_func(cbr->cbr_arg, intval);
}
mutex_exit(&ds->ds_dir->dd_lock);
} else {
dsl_prop_changed_notify(ds->ds_dir->dd_pool,
ds->ds_dir->dd_object, propname, intval, TRUE);
}
(void) snprintf(valbuf, sizeof (valbuf),
"%lld", (longlong_t)intval);
valstr = valbuf;
} else {
if (source == ZPROP_SRC_LOCAL) {
valstr = value;
} else {
tbuf = kmem_alloc(ZAP_MAXVALUELEN, KM_SLEEP);
if (dsl_prop_get_ds(ds, propname, 1,
ZAP_MAXVALUELEN, tbuf, NULL) == 0)
valstr = tbuf;
}
}
spa_history_log_internal_ds(ds, (source == ZPROP_SRC_NONE ||
source == ZPROP_SRC_INHERITED) ? "inherit" : "set", tx,
"%s=%s", propname, (valstr == NULL ? "" : valstr));
if (tbuf != NULL)
kmem_free(tbuf, ZAP_MAXVALUELEN);
}
int
dsl_prop_set_int(const char *dsname, const char *propname,
zprop_source_t source, uint64_t value)
{
nvlist_t *nvl = fnvlist_alloc();
int error;
fnvlist_add_uint64(nvl, propname, value);
error = dsl_props_set(dsname, source, nvl);
fnvlist_free(nvl);
return (error);
}
int
dsl_prop_set_string(const char *dsname, const char *propname,
zprop_source_t source, const char *value)
{
nvlist_t *nvl = fnvlist_alloc();
int error;
fnvlist_add_string(nvl, propname, value);
error = dsl_props_set(dsname, source, nvl);
fnvlist_free(nvl);
return (error);
}
int
dsl_prop_inherit(const char *dsname, const char *propname,
zprop_source_t source)
{
nvlist_t *nvl = fnvlist_alloc();
int error;
fnvlist_add_boolean(nvl, propname);
error = dsl_props_set(dsname, source, nvl);
fnvlist_free(nvl);
return (error);
}
int
dsl_props_set_check(void *arg, dmu_tx_t *tx)
{
dsl_props_set_arg_t *dpsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
uint64_t version;
nvpair_t *elem = NULL;
int err;
err = dsl_dataset_hold(dp, dpsa->dpsa_dsname, FTAG, &ds);
if (err != 0)
return (err);
version = spa_version(ds->ds_dir->dd_pool->dp_spa);
while ((elem = nvlist_next_nvpair(dpsa->dpsa_props, elem)) != NULL) {
if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENAMETOOLONG));
}
if (nvpair_type(elem) == DATA_TYPE_STRING) {
char *valstr = fnvpair_value_string(elem);
if (strlen(valstr) >= (version <
SPA_VERSION_STMF_PROP ?
ZAP_OLDMAXVALUELEN : ZAP_MAXVALUELEN)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(E2BIG));
}
}
}
if (ds->ds_is_snapshot && version < SPA_VERSION_SNAP_PROPS) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOTSUP));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_props_set_sync_impl(dsl_dataset_t *ds, zprop_source_t source,
nvlist_t *props, dmu_tx_t *tx)
{
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
nvpair_t *pair = elem;
const char *name = nvpair_name(pair);
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
/*
* This usually happens when we reuse the nvlist_t data
* returned by the counterpart dsl_prop_get_all_impl().
* For instance we do this to restore the original
* received properties when an error occurs in the
* zfs_ioc_recv() codepath.
*/
nvlist_t *attrs = fnvpair_value_nvlist(pair);
pair = fnvlist_lookup_nvpair(attrs, ZPROP_VALUE);
}
if (nvpair_type(pair) == DATA_TYPE_STRING) {
const char *value = fnvpair_value_string(pair);
dsl_prop_set_sync_impl(ds, name,
source, 1, strlen(value) + 1, value, tx);
} else if (nvpair_type(pair) == DATA_TYPE_UINT64) {
uint64_t intval = fnvpair_value_uint64(pair);
dsl_prop_set_sync_impl(ds, name,
source, sizeof (intval), 1, &intval, tx);
} else if (nvpair_type(pair) == DATA_TYPE_BOOLEAN) {
dsl_prop_set_sync_impl(ds, name,
source, 0, 0, NULL, tx);
} else {
panic("invalid nvpair type");
}
}
}
void
dsl_props_set_sync(void *arg, dmu_tx_t *tx)
{
dsl_props_set_arg_t *dpsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold(dp, dpsa->dpsa_dsname, FTAG, &ds));
dsl_props_set_sync_impl(ds, dpsa->dpsa_source, dpsa->dpsa_props, tx);
dsl_dataset_rele(ds, FTAG);
}
/*
* All-or-nothing; if any prop can't be set, nothing will be modified.
*/
int
dsl_props_set(const char *dsname, zprop_source_t source, nvlist_t *props)
{
dsl_props_set_arg_t dpsa;
int nblks = 0;
dpsa.dpsa_dsname = dsname;
dpsa.dpsa_source = source;
dpsa.dpsa_props = props;
/*
* If the source includes NONE, then we will only be removing entries
* from the ZAP object. In that case don't check for ENOSPC.
*/
if ((source & ZPROP_SRC_NONE) == 0)
nblks = 2 * fnvlist_num_pairs(props);
return (dsl_sync_task(dsname, dsl_props_set_check, dsl_props_set_sync,
&dpsa, nblks, ZFS_SPACE_CHECK_RESERVED));
}
typedef enum dsl_prop_getflags {
DSL_PROP_GET_INHERITING = 0x1, /* searching parent of target ds */
DSL_PROP_GET_SNAPSHOT = 0x2, /* snapshot dataset */
DSL_PROP_GET_LOCAL = 0x4, /* local properties */
DSL_PROP_GET_RECEIVED = 0x8, /* received properties */
} dsl_prop_getflags_t;
static int
dsl_prop_get_all_impl(objset_t *mos, uint64_t propobj,
const char *setpoint, dsl_prop_getflags_t flags, nvlist_t *nv)
{
zap_cursor_t zc;
zap_attribute_t za;
int err = 0;
for (zap_cursor_init(&zc, mos, propobj);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
nvlist_t *propval;
zfs_prop_t prop;
char buf[ZAP_MAXNAMELEN];
char *valstr;
const char *suffix;
const char *propname;
const char *source;
suffix = strchr(za.za_name, '$');
if (suffix == NULL) {
/*
* Skip local properties if we only want received
* properties.
*/
if (flags & DSL_PROP_GET_RECEIVED)
continue;
propname = za.za_name;
source = setpoint;
} else if (strcmp(suffix, ZPROP_INHERIT_SUFFIX) == 0) {
/* Skip explicitly inherited entries. */
continue;
} else if (strcmp(suffix, ZPROP_RECVD_SUFFIX) == 0) {
if (flags & DSL_PROP_GET_LOCAL)
continue;
(void) strncpy(buf, za.za_name, (suffix - za.za_name));
buf[suffix - za.za_name] = '\0';
propname = buf;
if (!(flags & DSL_PROP_GET_RECEIVED)) {
/* Skip if locally overridden. */
err = zap_contains(mos, propobj, propname);
if (err == 0)
continue;
if (err != ENOENT)
break;
/* Skip if explicitly inherited. */
valstr = kmem_asprintf("%s%s", propname,
ZPROP_INHERIT_SUFFIX);
err = zap_contains(mos, propobj, valstr);
kmem_strfree(valstr);
if (err == 0)
continue;
if (err != ENOENT)
break;
}
source = ((flags & DSL_PROP_GET_INHERITING) ?
setpoint : ZPROP_SOURCE_VAL_RECVD);
} else {
/*
* For backward compatibility, skip suffixes we don't
* recognize.
*/
continue;
}
prop = zfs_name_to_prop(propname);
/* Skip non-inheritable properties. */
if ((flags & DSL_PROP_GET_INHERITING) && prop != ZPROP_INVAL &&
!zfs_prop_inheritable(prop))
continue;
/* Skip properties not valid for this type. */
if ((flags & DSL_PROP_GET_SNAPSHOT) && prop != ZPROP_INVAL &&
!zfs_prop_valid_for_type(prop, ZFS_TYPE_SNAPSHOT, B_FALSE))
continue;
/* Skip properties already defined. */
if (nvlist_exists(nv, propname))
continue;
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
if (za.za_integer_length == 1) {
/*
* String property
*/
char *tmp = kmem_alloc(za.za_num_integers,
KM_SLEEP);
err = zap_lookup(mos, propobj,
za.za_name, 1, za.za_num_integers, tmp);
if (err != 0) {
kmem_free(tmp, za.za_num_integers);
break;
}
VERIFY(nvlist_add_string(propval, ZPROP_VALUE,
tmp) == 0);
kmem_free(tmp, za.za_num_integers);
} else {
/*
* Integer property
*/
ASSERT(za.za_integer_length == 8);
(void) nvlist_add_uint64(propval, ZPROP_VALUE,
za.za_first_integer);
}
VERIFY(nvlist_add_string(propval, ZPROP_SOURCE, source) == 0);
VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
nvlist_free(propval);
}
zap_cursor_fini(&zc);
if (err == ENOENT)
err = 0;
return (err);
}
/*
* Iterate over all properties for this dataset and return them in an nvlist.
*/
static int
dsl_prop_get_all_ds(dsl_dataset_t *ds, nvlist_t **nvp,
dsl_prop_getflags_t flags)
{
dsl_dir_t *dd = ds->ds_dir;
dsl_pool_t *dp = dd->dd_pool;
objset_t *mos = dp->dp_meta_objset;
int err = 0;
char setpoint[ZFS_MAX_DATASET_NAME_LEN];
VERIFY(nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP) == 0);
if (ds->ds_is_snapshot)
flags |= DSL_PROP_GET_SNAPSHOT;
ASSERT(dsl_pool_config_held(dp));
if (dsl_dataset_phys(ds)->ds_props_obj != 0) {
ASSERT(flags & DSL_PROP_GET_SNAPSHOT);
dsl_dataset_name(ds, setpoint);
err = dsl_prop_get_all_impl(mos,
dsl_dataset_phys(ds)->ds_props_obj, setpoint, flags, *nvp);
if (err)
goto out;
}
for (; dd != NULL; dd = dd->dd_parent) {
if (dd != ds->ds_dir || (flags & DSL_PROP_GET_SNAPSHOT)) {
if (flags & (DSL_PROP_GET_LOCAL |
DSL_PROP_GET_RECEIVED))
break;
flags |= DSL_PROP_GET_INHERITING;
}
dsl_dir_name(dd, setpoint);
err = dsl_prop_get_all_impl(mos,
dsl_dir_phys(dd)->dd_props_zapobj, setpoint, flags, *nvp);
if (err)
break;
}
out:
if (err) {
nvlist_free(*nvp);
*nvp = NULL;
}
return (err);
}
boolean_t
dsl_prop_get_hasrecvd(const char *dsname)
{
uint64_t dummy;
return (0 ==
dsl_prop_get_integer(dsname, ZPROP_HAS_RECVD, &dummy, NULL));
}
static int
dsl_prop_set_hasrecvd_impl(const char *dsname, zprop_source_t source)
{
uint64_t version;
spa_t *spa;
int error = 0;
VERIFY0(spa_open(dsname, &spa, FTAG));
version = spa_version(spa);
spa_close(spa, FTAG);
if (version >= SPA_VERSION_RECVD_PROPS)
error = dsl_prop_set_int(dsname, ZPROP_HAS_RECVD, source, 0);
return (error);
}
/*
* Call after successfully receiving properties to ensure that only the first
* receive on or after SPA_VERSION_RECVD_PROPS blows away local properties.
*/
int
dsl_prop_set_hasrecvd(const char *dsname)
{
int error = 0;
if (!dsl_prop_get_hasrecvd(dsname))
error = dsl_prop_set_hasrecvd_impl(dsname, ZPROP_SRC_LOCAL);
return (error);
}
void
dsl_prop_unset_hasrecvd(const char *dsname)
{
VERIFY0(dsl_prop_set_hasrecvd_impl(dsname, ZPROP_SRC_NONE));
}
int
dsl_prop_get_all(objset_t *os, nvlist_t **nvp)
{
return (dsl_prop_get_all_ds(os->os_dsl_dataset, nvp, 0));
}
int
dsl_prop_get_received(const char *dsname, nvlist_t **nvp)
{
objset_t *os;
int error;
/*
* Received properties are not distinguishable from local properties
* until the dataset has received properties on or after
* SPA_VERSION_RECVD_PROPS.
*/
dsl_prop_getflags_t flags = (dsl_prop_get_hasrecvd(dsname) ?
DSL_PROP_GET_RECEIVED : DSL_PROP_GET_LOCAL);
error = dmu_objset_hold(dsname, FTAG, &os);
if (error != 0)
return (error);
error = dsl_prop_get_all_ds(os->os_dsl_dataset, nvp, flags);
dmu_objset_rele(os, FTAG);
return (error);
}
void
dsl_prop_nvlist_add_uint64(nvlist_t *nv, zfs_prop_t prop, uint64_t value)
{
nvlist_t *propval;
const char *propname = zfs_prop_to_name(prop);
uint64_t default_value;
if (nvlist_lookup_nvlist(nv, propname, &propval) == 0) {
VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, value) == 0);
return;
}
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, value) == 0);
/* Indicate the default source if we can. */
if (dodefault(prop, 8, 1, &default_value) == 0 &&
value == default_value) {
VERIFY(nvlist_add_string(propval, ZPROP_SOURCE, "") == 0);
}
VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
nvlist_free(propval);
}
void
dsl_prop_nvlist_add_string(nvlist_t *nv, zfs_prop_t prop, const char *value)
{
nvlist_t *propval;
const char *propname = zfs_prop_to_name(prop);
if (nvlist_lookup_nvlist(nv, propname, &propval) == 0) {
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, value) == 0);
return;
}
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, value) == 0);
VERIFY(nvlist_add_nvlist(nv, propname, propval) == 0);
nvlist_free(propval);
}
#if defined(_KERNEL)
EXPORT_SYMBOL(dsl_prop_register);
EXPORT_SYMBOL(dsl_prop_unregister);
EXPORT_SYMBOL(dsl_prop_unregister_all);
EXPORT_SYMBOL(dsl_prop_get);
EXPORT_SYMBOL(dsl_prop_get_integer);
EXPORT_SYMBOL(dsl_prop_get_all);
EXPORT_SYMBOL(dsl_prop_get_received);
EXPORT_SYMBOL(dsl_prop_get_ds);
EXPORT_SYMBOL(dsl_prop_get_int_ds);
EXPORT_SYMBOL(dsl_prop_get_dd);
EXPORT_SYMBOL(dsl_props_set);
EXPORT_SYMBOL(dsl_prop_set_int);
EXPORT_SYMBOL(dsl_prop_set_string);
EXPORT_SYMBOL(dsl_prop_inherit);
EXPORT_SYMBOL(dsl_prop_predict);
EXPORT_SYMBOL(dsl_prop_nvlist_add_uint64);
EXPORT_SYMBOL(dsl_prop_nvlist_add_string);
#endif
diff --git a/sys/contrib/openzfs/module/zfs/spa.c b/sys/contrib/openzfs/module/zfs/spa.c
index 55870bee47fb..a02fd198bed0 100644
--- a/sys/contrib/openzfs/module/zfs/spa.c
+++ b/sys/contrib/openzfs/module/zfs/spa.c
@@ -1,9951 +1,9951 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2018, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2018 Joyent, Inc.
* Copyright (c) 2017, 2019, Datto Inc. All rights reserved.
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
*/
/*
* SPA: Storage Pool Allocator
*
* This file contains all the routines used when modifying on-disk SPA state.
* This includes opening, importing, destroying, exporting a pool, and syncing a
* pool.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/ddt.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_removal.h>
#include <sys/vdev_indirect_mapping.h>
#include <sys/vdev_indirect_births.h>
#include <sys/vdev_initialize.h>
#include <sys/vdev_rebuild.h>
#include <sys/vdev_trim.h>
#include <sys/vdev_disk.h>
#include <sys/vdev_draid.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/mmp.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/bpobj.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_objset.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/callb.h>
#include <sys/systeminfo.h>
#include <sys/spa_boot.h>
#include <sys/zfs_ioctl.h>
#include <sys/dsl_scan.h>
#include <sys/zfeature.h>
#include <sys/dsl_destroy.h>
#include <sys/zvol.h>
#ifdef _KERNEL
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/callb.h>
#include <sys/zone.h>
#include <sys/vmsystm.h>
#endif /* _KERNEL */
#include "zfs_prop.h"
#include "zfs_comutil.h"
/*
* The interval, in seconds, at which failed configuration cache file writes
* should be retried.
*/
int zfs_ccw_retry_interval = 300;
typedef enum zti_modes {
ZTI_MODE_FIXED, /* value is # of threads (min 1) */
ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
ZTI_MODE_SCALE, /* Taskqs scale with CPUs. */
ZTI_MODE_NULL, /* don't create a taskq */
ZTI_NMODES
} zti_modes_t;
#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
#define ZTI_SCALE { ZTI_MODE_SCALE, 0, 1 }
#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
#define ZTI_N(n) ZTI_P(n, 1)
#define ZTI_ONE ZTI_N(1)
typedef struct zio_taskq_info {
zti_modes_t zti_mode;
uint_t zti_value;
uint_t zti_count;
} zio_taskq_info_t;
static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
"iss", "iss_h", "int", "int_h"
};
/*
* This table defines the taskq settings for each ZFS I/O type. When
* initializing a pool, we use this table to create an appropriately sized
* taskq. Some operations are low volume and therefore have a small, static
* number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
* macros. Other operations process a large amount of data; the ZTI_BATCH
* macro causes us to create a taskq oriented for throughput. Some operations
* are so high frequency and short-lived that the taskq itself can become a
* point of lock contention. The ZTI_P(#, #) macro indicates that we need an
* additional degree of parallelism specified by the number of threads per-
* taskq and the number of taskqs; when dispatching an event in this case, the
* particular taskq is chosen at random. ZTI_SCALE is similar to ZTI_BATCH,
* but with number of taskqs also scaling with number of CPUs.
*
* The different taskq priorities are to handle the different contexts (issue
* and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
* need to be handled with minimum delay.
*/
const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
/* ISSUE ISSUE_HIGH INTR INTR_HIGH */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
{ ZTI_N(8), ZTI_NULL, ZTI_SCALE, ZTI_NULL }, /* READ */
{ ZTI_BATCH, ZTI_N(5), ZTI_SCALE, ZTI_N(5) }, /* WRITE */
{ ZTI_SCALE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
{ ZTI_N(4), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* TRIM */
};
static void spa_sync_version(void *arg, dmu_tx_t *tx);
static void spa_sync_props(void *arg, dmu_tx_t *tx);
static boolean_t spa_has_active_shared_spare(spa_t *spa);
static int spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport);
static void spa_vdev_resilver_done(spa_t *spa);
uint_t zio_taskq_batch_pct = 80; /* 1 thread per cpu in pset */
uint_t zio_taskq_batch_tpq; /* threads per taskq */
boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
uint_t zio_taskq_basedc = 80; /* base duty cycle */
boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
/*
* Report any spa_load_verify errors found, but do not fail spa_load.
* This is used by zdb to analyze non-idle pools.
*/
boolean_t spa_load_verify_dryrun = B_FALSE;
/*
* This (illegal) pool name is used when temporarily importing a spa_t in order
* to get the vdev stats associated with the imported devices.
*/
#define TRYIMPORT_NAME "$import"
/*
* For debugging purposes: print out vdev tree during pool import.
*/
int spa_load_print_vdev_tree = B_FALSE;
/*
* A non-zero value for zfs_max_missing_tvds means that we allow importing
* pools with missing top-level vdevs. This is strictly intended for advanced
* pool recovery cases since missing data is almost inevitable. Pools with
* missing devices can only be imported read-only for safety reasons, and their
* fail-mode will be automatically set to "continue".
*
* With 1 missing vdev we should be able to import the pool and mount all
* datasets. User data that was not modified after the missing device has been
* added should be recoverable. This means that snapshots created prior to the
* addition of that device should be completely intact.
*
* With 2 missing vdevs, some datasets may fail to mount since there are
* dataset statistics that are stored as regular metadata. Some data might be
* recoverable if those vdevs were added recently.
*
* With 3 or more missing vdevs, the pool is severely damaged and MOS entries
* may be missing entirely. Chances of data recovery are very low. Note that
* there are also risks of performing an inadvertent rewind as we might be
* missing all the vdevs with the latest uberblocks.
*/
unsigned long zfs_max_missing_tvds = 0;
/*
* The parameters below are similar to zfs_max_missing_tvds but are only
* intended for a preliminary open of the pool with an untrusted config which
* might be incomplete or out-dated.
*
* We are more tolerant for pools opened from a cachefile since we could have
* an out-dated cachefile where a device removal was not registered.
* We could have set the limit arbitrarily high but in the case where devices
* are really missing we would want to return the proper error codes; we chose
* SPA_DVAS_PER_BP - 1 so that some copies of the MOS would still be available
* and we get a chance to retrieve the trusted config.
*/
uint64_t zfs_max_missing_tvds_cachefile = SPA_DVAS_PER_BP - 1;
/*
* In the case where config was assembled by scanning device paths (/dev/dsks
* by default) we are less tolerant since all the existing devices should have
* been detected and we want spa_load to return the right error codes.
*/
uint64_t zfs_max_missing_tvds_scan = 0;
/*
* Debugging aid that pauses spa_sync() towards the end.
*/
boolean_t zfs_pause_spa_sync = B_FALSE;
/*
* Variables to indicate the livelist condense zthr func should wait at certain
* points for the livelist to be removed - used to test condense/destroy races
*/
int zfs_livelist_condense_zthr_pause = 0;
int zfs_livelist_condense_sync_pause = 0;
/*
* Variables to track whether or not condense cancellation has been
* triggered in testing.
*/
int zfs_livelist_condense_sync_cancel = 0;
int zfs_livelist_condense_zthr_cancel = 0;
/*
* Variable to track whether or not extra ALLOC blkptrs were added to a
* livelist entry while it was being condensed (caused by the way we track
* remapped blkptrs in dbuf_remap_impl)
*/
int zfs_livelist_condense_new_alloc = 0;
/*
* ==========================================================================
* SPA properties routines
* ==========================================================================
*/
/*
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
uint64_t intval, zprop_source_t src)
{
const char *propname = zpool_prop_to_name(prop);
nvlist_t *propval;
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
if (strval != NULL)
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
else
VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
nvlist_free(propval);
}
/*
* Get property values from the spa configuration.
*/
static void
spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
{
vdev_t *rvd = spa->spa_root_vdev;
dsl_pool_t *pool = spa->spa_dsl_pool;
uint64_t size, alloc, cap, version;
const zprop_source_t src = ZPROP_SRC_NONE;
spa_config_dirent_t *dp;
metaslab_class_t *mc = spa_normal_class(spa);
ASSERT(MUTEX_HELD(&spa->spa_props_lock));
if (rvd != NULL) {
alloc = metaslab_class_get_alloc(mc);
alloc += metaslab_class_get_alloc(spa_special_class(spa));
alloc += metaslab_class_get_alloc(spa_dedup_class(spa));
alloc += metaslab_class_get_alloc(spa_embedded_log_class(spa));
size = metaslab_class_get_space(mc);
size += metaslab_class_get_space(spa_special_class(spa));
size += metaslab_class_get_space(spa_dedup_class(spa));
size += metaslab_class_get_space(spa_embedded_log_class(spa));
spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
size - alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_CHECKPOINT, NULL,
spa->spa_checkpoint_info.sci_dspace, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
metaslab_class_fragmentation(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
metaslab_class_expandable_space(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
(spa_mode(spa) == SPA_MODE_READ), src);
cap = (size == 0) ? 0 : (alloc * 100 / size);
spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
ddt_get_pool_dedup_ratio(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
rvd->vdev_state, src);
version = spa_version(spa);
if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_DEFAULT);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_LOCAL);
}
spa_prop_add_list(*nvp, ZPOOL_PROP_LOAD_GUID,
NULL, spa_load_guid(spa), src);
}
if (pool != NULL) {
/*
* The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
* when opening pools before this version freedir will be NULL.
*/
if (pool->dp_free_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
NULL, 0, src);
}
if (pool->dp_leak_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
NULL, 0, src);
}
}
spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
if (spa->spa_comment != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
0, ZPROP_SRC_LOCAL);
}
if (spa->spa_compatibility != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_COMPATIBILITY,
spa->spa_compatibility, 0, ZPROP_SRC_LOCAL);
}
if (spa->spa_root != NULL)
spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
0, ZPROP_SRC_LOCAL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
}
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MAX_SIZE, ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MIN_SIZE, ZPROP_SRC_NONE);
}
if ((dp = list_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path == NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
"none", 0, ZPROP_SRC_LOCAL);
} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
dp->scd_path, 0, ZPROP_SRC_LOCAL);
}
}
}
/*
* Get zpool property values.
*/
int
spa_prop_get(spa_t *spa, nvlist_t **nvp)
{
objset_t *mos = spa->spa_meta_objset;
zap_cursor_t zc;
zap_attribute_t za;
dsl_pool_t *dp;
int err;
err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
if (err)
return (err);
dp = spa_get_dsl(spa);
dsl_pool_config_enter(dp, FTAG);
mutex_enter(&spa->spa_props_lock);
/*
* Get properties from the spa config.
*/
spa_prop_get_config(spa, nvp);
/* If no pool property object, no more prop to get. */
if (mos == NULL || spa->spa_pool_props_object == 0)
goto out;
/*
* Get properties from the MOS pool property object.
*/
for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t intval = 0;
char *strval = NULL;
zprop_source_t src = ZPROP_SRC_DEFAULT;
zpool_prop_t prop;
if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL)
continue;
switch (za.za_integer_length) {
case 8:
/* integer property */
if (za.za_first_integer !=
zpool_prop_default_numeric(prop))
src = ZPROP_SRC_LOCAL;
if (prop == ZPOOL_PROP_BOOTFS) {
dsl_dataset_t *ds = NULL;
err = dsl_dataset_hold_obj(dp,
za.za_first_integer, FTAG, &ds);
if (err != 0)
break;
strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
KM_SLEEP);
dsl_dataset_name(ds, strval);
dsl_dataset_rele(ds, FTAG);
} else {
strval = NULL;
intval = za.za_first_integer;
}
spa_prop_add_list(*nvp, prop, strval, intval, src);
if (strval != NULL)
kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
break;
case 1:
/* string property */
strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
err = zap_lookup(mos, spa->spa_pool_props_object,
za.za_name, 1, za.za_num_integers, strval);
if (err) {
kmem_free(strval, za.za_num_integers);
break;
}
spa_prop_add_list(*nvp, prop, strval, 0, src);
kmem_free(strval, za.za_num_integers);
break;
default:
break;
}
}
zap_cursor_fini(&zc);
out:
mutex_exit(&spa->spa_props_lock);
dsl_pool_config_exit(dp, FTAG);
if (err && err != ENOENT) {
nvlist_free(*nvp);
*nvp = NULL;
return (err);
}
return (0);
}
/*
* Validate the given pool properties nvlist and modify the list
* for the property values to be set.
*/
static int
spa_prop_validate(spa_t *spa, nvlist_t *props)
{
nvpair_t *elem;
int error = 0, reset_bootfs = 0;
uint64_t objnum = 0;
boolean_t has_feature = B_FALSE;
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
uint64_t intval;
char *strval, *slash, *check, *fname;
const char *propname = nvpair_name(elem);
zpool_prop_t prop = zpool_name_to_prop(propname);
switch (prop) {
case ZPOOL_PROP_INVAL:
if (!zpool_prop_feature(propname)) {
error = SET_ERROR(EINVAL);
break;
}
/*
* Sanitize the input.
*/
if (nvpair_type(elem) != DATA_TYPE_UINT64) {
error = SET_ERROR(EINVAL);
break;
}
if (nvpair_value_uint64(elem, &intval) != 0) {
error = SET_ERROR(EINVAL);
break;
}
if (intval != 0) {
error = SET_ERROR(EINVAL);
break;
}
fname = strchr(propname, '@') + 1;
if (zfeature_lookup_name(fname, NULL) != 0) {
error = SET_ERROR(EINVAL);
break;
}
has_feature = B_TRUE;
break;
case ZPOOL_PROP_VERSION:
error = nvpair_value_uint64(elem, &intval);
if (!error &&
(intval < spa_version(spa) ||
intval > SPA_VERSION_BEFORE_FEATURES ||
has_feature))
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_DELEGATION:
case ZPOOL_PROP_AUTOREPLACE:
case ZPOOL_PROP_LISTSNAPS:
case ZPOOL_PROP_AUTOEXPAND:
case ZPOOL_PROP_AUTOTRIM:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_MULTIHOST:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
if (!error) {
uint32_t hostid = zone_get_hostid(NULL);
if (hostid)
spa->spa_hostid = hostid;
else
error = SET_ERROR(ENOTSUP);
}
break;
case ZPOOL_PROP_BOOTFS:
/*
* If the pool version is less than SPA_VERSION_BOOTFS,
* or the pool is still being created (version == 0),
* the bootfs property cannot be set.
*/
if (spa_version(spa) < SPA_VERSION_BOOTFS) {
error = SET_ERROR(ENOTSUP);
break;
}
/*
* Make sure the vdev config is bootable
*/
if (!vdev_is_bootable(spa->spa_root_vdev)) {
error = SET_ERROR(ENOTSUP);
break;
}
reset_bootfs = 1;
error = nvpair_value_string(elem, &strval);
if (!error) {
objset_t *os;
if (strval == NULL || strval[0] == '\0') {
objnum = zpool_prop_default_numeric(
ZPOOL_PROP_BOOTFS);
break;
}
error = dmu_objset_hold(strval, FTAG, &os);
if (error != 0)
break;
/* Must be ZPL. */
if (dmu_objset_type(os) != DMU_OST_ZFS) {
error = SET_ERROR(ENOTSUP);
} else {
objnum = dmu_objset_id(os);
}
dmu_objset_rele(os, FTAG);
}
break;
case ZPOOL_PROP_FAILUREMODE:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > ZIO_FAILURE_MODE_PANIC)
error = SET_ERROR(EINVAL);
/*
* This is a special case which only occurs when
* the pool has completely failed. This allows
* the user to change the in-core failmode property
* without syncing it out to disk (I/Os might
* currently be blocked). We do this by returning
* EIO to the caller (spa_prop_set) to trick it
* into thinking we encountered a property validation
* error.
*/
if (!error && spa_suspended(spa)) {
spa->spa_failmode = intval;
error = SET_ERROR(EIO);
}
break;
case ZPOOL_PROP_CACHEFILE:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
error = SET_ERROR(EINVAL);
break;
}
slash = strrchr(strval, '/');
ASSERT(slash != NULL);
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_COMMENT:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
error = SET_ERROR(EINVAL);
break;
}
}
if (strlen(strval) > ZPROP_MAX_COMMENT)
error = SET_ERROR(E2BIG);
break;
default:
break;
}
if (error)
break;
}
(void) nvlist_remove_all(props,
zpool_prop_to_name(ZPOOL_PROP_DEDUPDITTO));
if (!error && reset_bootfs) {
error = nvlist_remove(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
if (!error) {
error = nvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
}
}
return (error);
}
void
spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
{
char *cachefile;
spa_config_dirent_t *dp;
if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
&cachefile) != 0)
return;
dp = kmem_alloc(sizeof (spa_config_dirent_t),
KM_SLEEP);
if (cachefile[0] == '\0')
dp->scd_path = spa_strdup(spa_config_path);
else if (strcmp(cachefile, "none") == 0)
dp->scd_path = NULL;
else
dp->scd_path = spa_strdup(cachefile);
list_insert_head(&spa->spa_config_list, dp);
if (need_sync)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
int
spa_prop_set(spa_t *spa, nvlist_t *nvp)
{
int error;
nvpair_t *elem = NULL;
boolean_t need_sync = B_FALSE;
if ((error = spa_prop_validate(spa, nvp)) != 0)
return (error);
while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
if (prop == ZPOOL_PROP_CACHEFILE ||
prop == ZPOOL_PROP_ALTROOT ||
prop == ZPOOL_PROP_READONLY)
continue;
if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
uint64_t ver;
if (prop == ZPOOL_PROP_VERSION) {
VERIFY(nvpair_value_uint64(elem, &ver) == 0);
} else {
ASSERT(zpool_prop_feature(nvpair_name(elem)));
ver = SPA_VERSION_FEATURES;
need_sync = B_TRUE;
}
/* Save time if the version is already set. */
if (ver == spa_version(spa))
continue;
/*
* In addition to the pool directory object, we might
* create the pool properties object, the features for
* read object, the features for write object, or the
* feature descriptions object.
*/
error = dsl_sync_task(spa->spa_name, NULL,
spa_sync_version, &ver,
6, ZFS_SPACE_CHECK_RESERVED);
if (error)
return (error);
continue;
}
need_sync = B_TRUE;
break;
}
if (need_sync) {
return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
nvp, 6, ZFS_SPACE_CHECK_RESERVED));
}
return (0);
}
/*
* If the bootfs property value is dsobj, clear it.
*/
void
spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
{
if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
VERIFY(zap_remove(spa->spa_meta_objset,
spa->spa_pool_props_object,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
spa->spa_bootfs = 0;
}
}
/*ARGSUSED*/
static int
spa_change_guid_check(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid __maybe_unused = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t vdev_state;
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
int error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (SET_ERROR(error));
}
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_state = rvd->vdev_state;
spa_config_exit(spa, SCL_STATE, FTAG);
if (vdev_state != VDEV_STATE_HEALTHY)
return (SET_ERROR(ENXIO));
ASSERT3U(spa_guid(spa), !=, *newguid);
return (0);
}
static void
spa_change_guid_sync(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
uint64_t oldguid;
vdev_t *rvd = spa->spa_root_vdev;
oldguid = spa_guid(spa);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
rvd->vdev_guid = *newguid;
rvd->vdev_guid_sum += (*newguid - oldguid);
vdev_config_dirty(rvd);
spa_config_exit(spa, SCL_STATE, FTAG);
spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
(u_longlong_t)oldguid, (u_longlong_t)*newguid);
}
/*
* Change the GUID for the pool. This is done so that we can later
* re-import a pool built from a clone of our own vdevs. We will modify
* the root vdev's guid, our own pool guid, and then mark all of our
* vdevs dirty. Note that we must make sure that all our vdevs are
* online when we do this, or else any vdevs that weren't present
* would be orphaned from our pool. We are also going to issue a
* sysevent to update any watchers.
*/
int
spa_change_guid(spa_t *spa)
{
int error;
uint64_t guid;
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
guid = spa_generate_guid(NULL);
error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
if (error == 0) {
spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
}
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* ==========================================================================
* SPA state manipulation (open/create/destroy/import/export)
* ==========================================================================
*/
static int
spa_error_entry_compare(const void *a, const void *b)
{
const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
int ret;
ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
sizeof (zbookmark_phys_t));
return (TREE_ISIGN(ret));
}
/*
* Utility function which retrieves copies of the current logs and
* re-initializes them in the process.
*/
void
spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
{
ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
}
static void
spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
enum zti_modes mode = ztip->zti_mode;
uint_t value = ztip->zti_value;
uint_t count = ztip->zti_count;
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t cpus, flags = TASKQ_DYNAMIC;
boolean_t batch = B_FALSE;
switch (mode) {
case ZTI_MODE_FIXED:
ASSERT3U(value, >, 0);
break;
case ZTI_MODE_BATCH:
batch = B_TRUE;
flags |= TASKQ_THREADS_CPU_PCT;
value = MIN(zio_taskq_batch_pct, 100);
break;
case ZTI_MODE_SCALE:
flags |= TASKQ_THREADS_CPU_PCT;
/*
* We want more taskqs to reduce lock contention, but we want
* less for better request ordering and CPU utilization.
*/
cpus = MAX(1, boot_ncpus * zio_taskq_batch_pct / 100);
if (zio_taskq_batch_tpq > 0) {
count = MAX(1, (cpus + zio_taskq_batch_tpq / 2) /
zio_taskq_batch_tpq);
} else {
/*
* Prefer 6 threads per taskq, but no more taskqs
* than threads in them on large systems. For 80%:
*
* taskq taskq total
* cpus taskqs percent threads threads
* ------- ------- ------- ------- -------
* 1 1 80% 1 1
* 2 1 80% 1 1
* 4 1 80% 3 3
* 8 2 40% 3 6
* 16 3 27% 4 12
* 32 5 16% 5 25
* 64 7 11% 7 49
* 128 10 8% 10 100
* 256 14 6% 15 210
*/
count = 1 + cpus / 6;
while (count * count > cpus)
count--;
}
/* Limit each taskq within 100% to not trigger assertion. */
count = MAX(count, (zio_taskq_batch_pct + 99) / 100);
value = (zio_taskq_batch_pct + count / 2) / count;
break;
case ZTI_MODE_NULL:
tqs->stqs_count = 0;
tqs->stqs_taskq = NULL;
return;
default:
panic("unrecognized mode for %s_%s taskq (%u:%u) in "
"spa_activate()",
zio_type_name[t], zio_taskq_types[q], mode, value);
break;
}
ASSERT3U(count, >, 0);
tqs->stqs_count = count;
tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
for (uint_t i = 0; i < count; i++) {
taskq_t *tq;
char name[32];
if (count > 1)
(void) snprintf(name, sizeof (name), "%s_%s_%u",
zio_type_name[t], zio_taskq_types[q], i);
else
(void) snprintf(name, sizeof (name), "%s_%s",
zio_type_name[t], zio_taskq_types[q]);
if (zio_taskq_sysdc && spa->spa_proc != &p0) {
if (batch)
flags |= TASKQ_DC_BATCH;
tq = taskq_create_sysdc(name, value, 50, INT_MAX,
spa->spa_proc, zio_taskq_basedc, flags);
} else {
pri_t pri = maxclsyspri;
/*
* The write issue taskq can be extremely CPU
* intensive. Run it at slightly less important
* priority than the other taskqs.
*
* Under Linux and FreeBSD this means incrementing
* the priority value as opposed to platforms like
* illumos where it should be decremented.
*
* On FreeBSD, if priorities divided by four (RQ_PPQ)
* are equal then a difference between them is
* insignificant.
*/
if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE) {
#if defined(__linux__)
pri++;
#elif defined(__FreeBSD__)
pri += 4;
#else
#error "unknown OS"
#endif
}
tq = taskq_create_proc(name, value, pri, 50,
INT_MAX, spa->spa_proc, flags);
}
tqs->stqs_taskq[i] = tq;
}
}
static void
spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
if (tqs->stqs_taskq == NULL) {
ASSERT3U(tqs->stqs_count, ==, 0);
return;
}
for (uint_t i = 0; i < tqs->stqs_count; i++) {
ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
taskq_destroy(tqs->stqs_taskq[i]);
}
kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
tqs->stqs_taskq = NULL;
}
/*
* Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
* Note that a type may have multiple discrete taskqs to avoid lock contention
* on the taskq itself. In that case we choose which taskq at random by using
* the low bits of gethrtime().
*/
void
spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
taskq_dispatch_ent(tq, func, arg, flags, ent);
}
/*
* Same as spa_taskq_dispatch_ent() but block on the task until completion.
*/
void
spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
taskqid_t id;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
id = taskq_dispatch(tq, func, arg, flags);
if (id)
taskq_wait_id(tq, id);
}
static void
spa_create_zio_taskqs(spa_t *spa)
{
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_init(spa, t, q);
}
}
}
/*
* Disabled until spa_thread() can be adapted for Linux.
*/
#undef HAVE_SPA_THREAD
#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
static void
spa_thread(void *arg)
{
psetid_t zio_taskq_psrset_bind = PS_NONE;
callb_cpr_t cprinfo;
spa_t *spa = arg;
user_t *pu = PTOU(curproc);
CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
spa->spa_name);
ASSERT(curproc != &p0);
(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
"zpool-%s", spa->spa_name);
(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
/* bind this thread to the requested psrset */
if (zio_taskq_psrset_bind != PS_NONE) {
pool_lock();
mutex_enter(&cpu_lock);
mutex_enter(&pidlock);
mutex_enter(&curproc->p_lock);
if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
0, NULL, NULL) == 0) {
curthread->t_bind_pset = zio_taskq_psrset_bind;
} else {
cmn_err(CE_WARN,
"Couldn't bind process for zfs pool \"%s\" to "
"pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
}
mutex_exit(&curproc->p_lock);
mutex_exit(&pidlock);
mutex_exit(&cpu_lock);
pool_unlock();
}
if (zio_taskq_sysdc) {
sysdc_thread_enter(curthread, 100, 0);
}
spa->spa_proc = curproc;
spa->spa_did = curthread->t_did;
spa_create_zio_taskqs(spa);
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
spa->spa_proc_state = SPA_PROC_ACTIVE;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_SAFE_BEGIN(&cprinfo);
while (spa->spa_proc_state == SPA_PROC_ACTIVE)
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
spa->spa_proc_state = SPA_PROC_GONE;
spa->spa_proc = &p0;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
mutex_enter(&curproc->p_lock);
lwp_exit();
}
#endif
/*
* Activate an uninitialized pool.
*/
static void
spa_activate(spa_t *spa, spa_mode_t mode)
{
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_mode = mode;
spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
spa->spa_embedded_log_class =
metaslab_class_create(spa, zfs_metaslab_ops);
spa->spa_special_class = metaslab_class_create(spa, zfs_metaslab_ops);
spa->spa_dedup_class = metaslab_class_create(spa, zfs_metaslab_ops);
/* Try to create a covering process */
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
ASSERT(spa->spa_proc == &p0);
spa->spa_did = 0;
#ifdef HAVE_SPA_THREAD
/* Only create a process if we're going to be around a while. */
if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
NULL, 0) == 0) {
spa->spa_proc_state = SPA_PROC_CREATED;
while (spa->spa_proc_state == SPA_PROC_CREATED) {
cv_wait(&spa->spa_proc_cv,
&spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
ASSERT(spa->spa_proc != &p0);
ASSERT(spa->spa_did != 0);
} else {
#ifdef _KERNEL
cmn_err(CE_WARN,
"Couldn't create process for zfs pool \"%s\"\n",
spa->spa_name);
#endif
}
}
#endif /* HAVE_SPA_THREAD */
mutex_exit(&spa->spa_proc_lock);
/* If we didn't create a process, we need to create our taskqs. */
if (spa->spa_proc == &p0) {
spa_create_zio_taskqs(spa);
}
for (size_t i = 0; i < TXG_SIZE; i++) {
spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
}
list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_config_dirty_node));
list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
offsetof(objset_t, os_evicting_node));
list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_state_dirty_node));
txg_list_create(&spa->spa_vdev_txg_list, spa,
offsetof(struct vdev, vdev_txg_node));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
spa_keystore_init(&spa->spa_keystore);
/*
* This taskq is used to perform zvol-minor-related tasks
* asynchronously. This has several advantages, including easy
* resolution of various deadlocks.
*
* The taskq must be single threaded to ensure tasks are always
* processed in the order in which they were dispatched.
*
* A taskq per pool allows one to keep the pools independent.
* This way if one pool is suspended, it will not impact another.
*
* The preferred location to dispatch a zvol minor task is a sync
* task. In this context, there is easy access to the spa_t and minimal
* error handling is required because the sync task must succeed.
*/
spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1, INT_MAX, 0);
/*
* Taskq dedicated to prefetcher threads: this is used to prevent the
* pool traverse code from monopolizing the global (and limited)
* system_taskq by inappropriately scheduling long running tasks on it.
*/
spa->spa_prefetch_taskq = taskq_create("z_prefetch", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
/*
* The taskq to upgrade datasets in this pool. Currently used by
* feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
*/
spa->spa_upgrade_taskq = taskq_create("z_upgrade", 100,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC | TASKQ_THREADS_CPU_PCT);
}
/*
* Opposite of spa_activate().
*/
static void
spa_deactivate(spa_t *spa)
{
ASSERT(spa->spa_sync_on == B_FALSE);
ASSERT(spa->spa_dsl_pool == NULL);
ASSERT(spa->spa_root_vdev == NULL);
ASSERT(spa->spa_async_zio_root == NULL);
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
spa_evicting_os_wait(spa);
if (spa->spa_zvol_taskq) {
taskq_destroy(spa->spa_zvol_taskq);
spa->spa_zvol_taskq = NULL;
}
if (spa->spa_prefetch_taskq) {
taskq_destroy(spa->spa_prefetch_taskq);
spa->spa_prefetch_taskq = NULL;
}
if (spa->spa_upgrade_taskq) {
taskq_destroy(spa->spa_upgrade_taskq);
spa->spa_upgrade_taskq = NULL;
}
txg_list_destroy(&spa->spa_vdev_txg_list);
list_destroy(&spa->spa_config_dirty_list);
list_destroy(&spa->spa_evicting_os_list);
list_destroy(&spa->spa_state_dirty_list);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_fini(spa, t, q);
}
}
for (size_t i = 0; i < TXG_SIZE; i++) {
ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
VERIFY0(zio_wait(spa->spa_txg_zio[i]));
spa->spa_txg_zio[i] = NULL;
}
metaslab_class_destroy(spa->spa_normal_class);
spa->spa_normal_class = NULL;
metaslab_class_destroy(spa->spa_log_class);
spa->spa_log_class = NULL;
metaslab_class_destroy(spa->spa_embedded_log_class);
spa->spa_embedded_log_class = NULL;
metaslab_class_destroy(spa->spa_special_class);
spa->spa_special_class = NULL;
metaslab_class_destroy(spa->spa_dedup_class);
spa->spa_dedup_class = NULL;
/*
* If this was part of an import or the open otherwise failed, we may
* still have errors left in the queues. Empty them just in case.
*/
spa_errlog_drain(spa);
avl_destroy(&spa->spa_errlist_scrub);
avl_destroy(&spa->spa_errlist_last);
spa_keystore_fini(&spa->spa_keystore);
spa->spa_state = POOL_STATE_UNINITIALIZED;
mutex_enter(&spa->spa_proc_lock);
if (spa->spa_proc_state != SPA_PROC_NONE) {
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
spa->spa_proc_state = SPA_PROC_DEACTIVATE;
cv_broadcast(&spa->spa_proc_cv);
while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
ASSERT(spa->spa_proc != &p0);
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
spa->spa_proc_state = SPA_PROC_NONE;
}
ASSERT(spa->spa_proc == &p0);
mutex_exit(&spa->spa_proc_lock);
/*
* We want to make sure spa_thread() has actually exited the ZFS
* module, so that the module can't be unloaded out from underneath
* it.
*/
if (spa->spa_did != 0) {
thread_join(spa->spa_did);
spa->spa_did = 0;
}
}
/*
* Verify a pool configuration, and construct the vdev tree appropriately. This
* will create all the necessary vdevs in the appropriate layout, with each vdev
* in the CLOSED state. This will prep the pool before open/creation/import.
* All vdev validation is done by the vdev_alloc() routine.
*/
int
spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
uint_t id, int atype)
{
nvlist_t **child;
uint_t children;
int error;
if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
return (error);
if ((*vdp)->vdev_ops->vdev_op_leaf)
return (0);
error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (error == ENOENT)
return (0);
if (error) {
vdev_free(*vdp);
*vdp = NULL;
return (SET_ERROR(EINVAL));
}
for (int c = 0; c < children; c++) {
vdev_t *vd;
if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
atype)) != 0) {
vdev_free(*vdp);
*vdp = NULL;
return (error);
}
}
ASSERT(*vdp != NULL);
return (0);
}
static boolean_t
spa_should_flush_logs_on_unload(spa_t *spa)
{
if (!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))
return (B_FALSE);
if (!spa_writeable(spa))
return (B_FALSE);
if (!spa->spa_sync_on)
return (B_FALSE);
if (spa_state(spa) != POOL_STATE_EXPORTED)
return (B_FALSE);
if (zfs_keep_log_spacemaps_at_export)
return (B_FALSE);
return (B_TRUE);
}
/*
* Opens a transaction that will set the flag that will instruct
* spa_sync to attempt to flush all the metaslabs for that txg.
*/
static void
spa_unload_log_sm_flush_all(spa_t *spa)
{
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
ASSERT3U(spa->spa_log_flushall_txg, ==, 0);
spa->spa_log_flushall_txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
txg_wait_synced(spa_get_dsl(spa), spa->spa_log_flushall_txg);
}
static void
spa_unload_log_sm_metadata(spa_t *spa)
{
void *cookie = NULL;
spa_log_sm_t *sls;
while ((sls = avl_destroy_nodes(&spa->spa_sm_logs_by_txg,
&cookie)) != NULL) {
VERIFY0(sls->sls_mscount);
kmem_free(sls, sizeof (spa_log_sm_t));
}
for (log_summary_entry_t *e = list_head(&spa->spa_log_summary);
e != NULL; e = list_head(&spa->spa_log_summary)) {
VERIFY0(e->lse_mscount);
list_remove(&spa->spa_log_summary, e);
kmem_free(e, sizeof (log_summary_entry_t));
}
spa->spa_unflushed_stats.sus_nblocks = 0;
spa->spa_unflushed_stats.sus_memused = 0;
spa->spa_unflushed_stats.sus_blocklimit = 0;
}
static void
spa_destroy_aux_threads(spa_t *spa)
{
if (spa->spa_condense_zthr != NULL) {
zthr_destroy(spa->spa_condense_zthr);
spa->spa_condense_zthr = NULL;
}
if (spa->spa_checkpoint_discard_zthr != NULL) {
zthr_destroy(spa->spa_checkpoint_discard_zthr);
spa->spa_checkpoint_discard_zthr = NULL;
}
if (spa->spa_livelist_delete_zthr != NULL) {
zthr_destroy(spa->spa_livelist_delete_zthr);
spa->spa_livelist_delete_zthr = NULL;
}
if (spa->spa_livelist_condense_zthr != NULL) {
zthr_destroy(spa->spa_livelist_condense_zthr);
spa->spa_livelist_condense_zthr = NULL;
}
}
/*
* Opposite of spa_load().
*/
static void
spa_unload(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa_state(spa) != POOL_STATE_UNINITIALIZED);
spa_import_progress_remove(spa_guid(spa));
spa_load_note(spa, "UNLOADING");
spa_wake_waiters(spa);
/*
* If the log space map feature is enabled and the pool is getting
* exported (but not destroyed), we want to spend some time flushing
* as many metaslabs as we can in an attempt to destroy log space
* maps and save import time.
*/
if (spa_should_flush_logs_on_unload(spa))
spa_unload_log_sm_flush_all(spa);
/*
* Stop async tasks.
*/
spa_async_suspend(spa);
if (spa->spa_root_vdev) {
vdev_t *root_vdev = spa->spa_root_vdev;
vdev_initialize_stop_all(root_vdev, VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(root_vdev, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_all(spa);
vdev_rebuild_stop_all(spa);
}
/*
* Stop syncing.
*/
if (spa->spa_sync_on) {
txg_sync_stop(spa->spa_dsl_pool);
spa->spa_sync_on = B_FALSE;
}
/*
* This ensures that there is no async metaslab prefetching
* while we attempt to unload the spa.
*/
if (spa->spa_root_vdev != NULL) {
for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++) {
vdev_t *vc = spa->spa_root_vdev->vdev_child[c];
if (vc->vdev_mg != NULL)
taskq_wait(vc->vdev_mg->mg_taskq);
}
}
if (spa->spa_mmp.mmp_thread)
mmp_thread_stop(spa);
/*
* Wait for any outstanding async I/O to complete.
*/
if (spa->spa_async_zio_root != NULL) {
for (int i = 0; i < max_ncpus; i++)
(void) zio_wait(spa->spa_async_zio_root[i]);
kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
spa->spa_async_zio_root = NULL;
}
if (spa->spa_vdev_removal != NULL) {
spa_vdev_removal_destroy(spa->spa_vdev_removal);
spa->spa_vdev_removal = NULL;
}
spa_destroy_aux_threads(spa);
spa_condense_fini(spa);
bpobj_close(&spa->spa_deferred_bpobj);
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
/*
* Close all vdevs.
*/
if (spa->spa_root_vdev)
vdev_free(spa->spa_root_vdev);
ASSERT(spa->spa_root_vdev == NULL);
/*
* Close the dsl pool.
*/
if (spa->spa_dsl_pool) {
dsl_pool_close(spa->spa_dsl_pool);
spa->spa_dsl_pool = NULL;
spa->spa_meta_objset = NULL;
}
ddt_unload(spa);
spa_unload_log_sm_metadata(spa);
/*
* Drop and purge level 2 cache
*/
spa_l2cache_drop(spa);
for (int i = 0; i < spa->spa_spares.sav_count; i++)
vdev_free(spa->spa_spares.sav_vdevs[i]);
if (spa->spa_spares.sav_vdevs) {
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
spa->spa_spares.sav_vdevs = NULL;
}
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
}
spa->spa_spares.sav_count = 0;
for (int i = 0; i < spa->spa_l2cache.sav_count; i++) {
vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
vdev_free(spa->spa_l2cache.sav_vdevs[i]);
}
if (spa->spa_l2cache.sav_vdevs) {
kmem_free(spa->spa_l2cache.sav_vdevs,
spa->spa_l2cache.sav_count * sizeof (void *));
spa->spa_l2cache.sav_vdevs = NULL;
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
}
spa->spa_l2cache.sav_count = 0;
spa->spa_async_suspended = 0;
spa->spa_indirect_vdevs_loaded = B_FALSE;
if (spa->spa_comment != NULL) {
spa_strfree(spa->spa_comment);
spa->spa_comment = NULL;
}
if (spa->spa_compatibility != NULL) {
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = NULL;
}
spa_config_exit(spa, SCL_ALL, spa);
}
/*
* Load (or re-load) the current list of vdevs describing the active spares for
* this pool. When this is called, we have some form of basic information in
* 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
*/
void
spa_load_spares(spa_t *spa)
{
nvlist_t **spares;
uint_t nspares;
int i;
vdev_t *vd, *tvd;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As spare vdevs are shared among open pools, we skip loading
* them when we load the checkpointed state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* First, close and free any existing spare vdevs.
*/
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
/* Undo the call to spa_activate() below */
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL && tvd->vdev_isspare)
spa_spare_remove(tvd);
vdev_close(vd);
vdev_free(vd);
}
if (spa->spa_spares.sav_vdevs)
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
if (spa->spa_spares.sav_config == NULL)
nspares = 0;
else
VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
spa->spa_spares.sav_count = (int)nspares;
spa->spa_spares.sav_vdevs = NULL;
if (nspares == 0)
return;
/*
* Construct the array of vdevs, opening them to get status in the
* process. For each spare, there is potentially two different vdev_t
* structures associated with it: one in the list of spares (used only
* for basic validation purposes) and one in the active vdev
* configuration (if it's spared in). During this phase we open and
* validate each vdev on the spare list. If the vdev also exists in the
* active configuration, then we also mark this vdev as an active spare.
*/
spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++) {
VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
VDEV_ALLOC_SPARE) == 0);
ASSERT(vd != NULL);
spa->spa_spares.sav_vdevs[i] = vd;
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL) {
if (!tvd->vdev_isspare)
spa_spare_add(tvd);
/*
* We only mark the spare active if we were successfully
* able to load the vdev. Otherwise, importing a pool
* with a bad active spare would result in strange
* behavior, because multiple pool would think the spare
* is actively in use.
*
* There is a vulnerability here to an equally bizarre
* circumstance, where a dead active spare is later
* brought back to life (onlined or otherwise). Given
* the rarity of this scenario, and the extra complexity
* it adds, we ignore the possibility.
*/
if (!vdev_is_dead(tvd))
spa_spare_activate(tvd);
}
vd->vdev_top = vd;
vd->vdev_aux = &spa->spa_spares;
if (vdev_open(vd) != 0)
continue;
if (vdev_validate_aux(vd) == 0)
spa_spare_add(vd);
}
/*
* Recompute the stashed list of spares, with status information
* this time.
*/
VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
DATA_TYPE_NVLIST_ARRAY) == 0);
spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++)
spares[i] = vdev_config_generate(spa,
spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
for (i = 0; i < spa->spa_spares.sav_count; i++)
nvlist_free(spares[i]);
kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
}
/*
* Load (or re-load) the current list of vdevs describing the active l2cache for
* this pool. When this is called, we have some form of basic information in
* 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
* Devices which are already active have their details maintained, and are
* not re-opened.
*/
void
spa_load_l2cache(spa_t *spa)
{
nvlist_t **l2cache = NULL;
uint_t nl2cache;
int i, j, oldnvdevs;
uint64_t guid;
vdev_t *vd, **oldvdevs, **newvdevs;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
#ifndef _KERNEL
/*
* zdb opens both the current state of the pool and the
* checkpointed state (if present), with a different spa_t.
*
* As L2 caches are part of the ARC which is shared among open
* pools, we skip loading them when we load the checkpointed
* state of the pool.
*/
if (!spa_writeable(spa))
return;
#endif
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
oldvdevs = sav->sav_vdevs;
oldnvdevs = sav->sav_count;
sav->sav_vdevs = NULL;
sav->sav_count = 0;
if (sav->sav_config == NULL) {
nl2cache = 0;
newvdevs = NULL;
goto out;
}
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
/*
* Process new nvlist of vdevs.
*/
for (i = 0; i < nl2cache; i++) {
VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
&guid) == 0);
newvdevs[i] = NULL;
for (j = 0; j < oldnvdevs; j++) {
vd = oldvdevs[j];
if (vd != NULL && guid == vd->vdev_guid) {
/*
* Retain previous vdev for add/remove ops.
*/
newvdevs[i] = vd;
oldvdevs[j] = NULL;
break;
}
}
if (newvdevs[i] == NULL) {
/*
* Create new vdev
*/
VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
VDEV_ALLOC_L2CACHE) == 0);
ASSERT(vd != NULL);
newvdevs[i] = vd;
/*
* Commit this vdev as an l2cache device,
* even if it fails to open.
*/
spa_l2cache_add(vd);
vd->vdev_top = vd;
vd->vdev_aux = sav;
spa_l2cache_activate(vd);
if (vdev_open(vd) != 0)
continue;
(void) vdev_validate_aux(vd);
if (!vdev_is_dead(vd))
l2arc_add_vdev(spa, vd);
/*
* Upon cache device addition to a pool or pool
* creation with a cache device or if the header
* of the device is invalid we issue an async
* TRIM command for the whole device which will
* execute if l2arc_trim_ahead > 0.
*/
spa_async_request(spa, SPA_ASYNC_L2CACHE_TRIM);
}
}
sav->sav_vdevs = newvdevs;
sav->sav_count = (int)nl2cache;
/*
* Recompute the stashed list of l2cache devices, with status
* information this time.
*/
VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
DATA_TYPE_NVLIST_ARRAY) == 0);
if (sav->sav_count > 0)
l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
l2cache[i] = vdev_config_generate(spa,
sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
VERIFY(nvlist_add_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
out:
/*
* Purge vdevs that were dropped
*/
for (i = 0; i < oldnvdevs; i++) {
uint64_t pool;
vd = oldvdevs[i];
if (vd != NULL) {
ASSERT(vd->vdev_isl2cache);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
vdev_clear_stats(vd);
vdev_free(vd);
}
}
if (oldvdevs)
kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
for (i = 0; i < sav->sav_count; i++)
nvlist_free(l2cache[i]);
if (sav->sav_count)
kmem_free(l2cache, sav->sav_count * sizeof (void *));
}
static int
load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
{
dmu_buf_t *db;
char *packed = NULL;
size_t nvsize = 0;
int error;
*value = NULL;
error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
if (error)
return (error);
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
packed = vmem_alloc(nvsize, KM_SLEEP);
error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
DMU_READ_PREFETCH);
if (error == 0)
error = nvlist_unpack(packed, nvsize, value, 0);
vmem_free(packed, nvsize);
return (error);
}
/*
* Concrete top-level vdevs that are not missing and are not logs. At every
* spa_sync we write new uberblocks to at least SPA_SYNC_MIN_VDEVS core tvds.
*/
static uint64_t
spa_healthy_core_tvds(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t tvds = 0;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *vd = rvd->vdev_child[i];
if (vd->vdev_islog)
continue;
if (vdev_is_concrete(vd) && !vdev_is_dead(vd))
tvds++;
}
return (tvds);
}
/*
* Checks to see if the given vdev could not be opened, in which case we post a
* sysevent to notify the autoreplace code that the device has been removed.
*/
static void
spa_check_removed(vdev_t *vd)
{
for (uint64_t c = 0; c < vd->vdev_children; c++)
spa_check_removed(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
vdev_is_concrete(vd)) {
zfs_post_autoreplace(vd->vdev_spa, vd);
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
}
}
static int
spa_check_for_missing_logs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're doing a normal import, then build up any additional
* diagnostic information about missing log devices.
* We'll pass this up to the user for further processing.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
nvlist_t **child, *nv;
uint64_t idx = 0;
child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
KM_SLEEP);
VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
/*
* We consider a device as missing only if it failed
* to open (i.e. offline or faulted is not considered
* as missing).
*/
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
child[idx++] = vdev_config_generate(spa, tvd,
B_FALSE, VDEV_CONFIG_MISSING);
}
}
if (idx > 0) {
fnvlist_add_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, child, idx);
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_MISSING_DEVICES, nv);
for (uint64_t i = 0; i < idx; i++)
nvlist_free(child[i]);
}
nvlist_free(nv);
kmem_free(child, rvd->vdev_children * sizeof (char **));
if (idx > 0) {
spa_load_failed(spa, "some log devices are missing");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
} else {
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog &&
tvd->vdev_state == VDEV_STATE_CANT_OPEN) {
spa_set_log_state(spa, SPA_LOG_CLEAR);
spa_load_note(spa, "some log devices are "
"missing, ZIL is dropped.");
vdev_dbgmsg_print_tree(rvd, 2);
break;
}
}
}
return (0);
}
/*
* Check for missing log devices
*/
static boolean_t
spa_check_logs(spa_t *spa)
{
boolean_t rv = B_FALSE;
dsl_pool_t *dp = spa_get_dsl(spa);
switch (spa->spa_log_state) {
default:
break;
case SPA_LOG_MISSING:
/* need to recheck in case slog has been restored */
case SPA_LOG_UNKNOWN:
rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
if (rv)
spa_set_log_state(spa, SPA_LOG_MISSING);
break;
}
return (rv);
}
/*
* Passivate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static boolean_t
spa_passivate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
boolean_t slog_found = B_FALSE;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_passivate(tvd->vdev_mg);
slog_found = B_TRUE;
}
}
return (slog_found);
}
/*
* Activate any log vdevs (note, does not apply to embedded log metaslabs).
*/
static void
spa_activate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_islog) {
ASSERT3P(tvd->vdev_log_mg, ==, NULL);
metaslab_group_activate(tvd->vdev_mg);
}
}
}
int
spa_reset_logs(spa_t *spa)
{
int error;
error = dmu_objset_find(spa_name(spa), zil_reset,
NULL, DS_FIND_CHILDREN);
if (error == 0) {
/*
* We successfully offlined the log device, sync out the
* current txg so that the "stubby" block can be removed
* by zil_sync().
*/
txg_wait_synced(spa->spa_dsl_pool, 0);
}
return (error);
}
static void
spa_aux_check_removed(spa_aux_vdev_t *sav)
{
for (int i = 0; i < sav->sav_count; i++)
spa_check_removed(sav->sav_vdevs[i]);
}
void
spa_claim_notify(zio_t *zio)
{
spa_t *spa = zio->io_spa;
if (zio->io_error)
return;
mutex_enter(&spa->spa_props_lock); /* any mutex will do */
if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
spa->spa_claim_max_txg = zio->io_bp->blk_birth;
mutex_exit(&spa->spa_props_lock);
}
typedef struct spa_load_error {
uint64_t sle_meta_count;
uint64_t sle_data_count;
} spa_load_error_t;
static void
spa_load_verify_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
spa_load_error_t *sle = zio->io_private;
dmu_object_type_t type = BP_GET_TYPE(bp);
int error = zio->io_error;
spa_t *spa = zio->io_spa;
abd_free(zio->io_abd);
if (error) {
if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
type != DMU_OT_INTENT_LOG)
atomic_inc_64(&sle->sle_meta_count);
else
atomic_inc_64(&sle->sle_data_count);
}
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
/*
* Maximum number of inflight bytes is the log2 fraction of the arc size.
* By default, we set it to 1/16th of the arc.
*/
int spa_load_verify_shift = 4;
int spa_load_verify_metadata = B_TRUE;
int spa_load_verify_data = B_TRUE;
/*ARGSUSED*/
static int
spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
if (zb->zb_level == ZB_DNODE_LEVEL || BP_IS_HOLE(bp) ||
BP_IS_EMBEDDED(bp) || BP_IS_REDACTED(bp))
return (0);
/*
* Note: normally this routine will not be called if
* spa_load_verify_metadata is not set. However, it may be useful
* to manually set the flag after the traversal has begun.
*/
if (!spa_load_verify_metadata)
return (0);
if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
return (0);
uint64_t maxinflight_bytes =
arc_target_bytes() >> spa_load_verify_shift;
zio_t *rio = arg;
size_t size = BP_GET_PSIZE(bp);
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_bytes >= maxinflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_bytes += size;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
return (0);
}
/* ARGSUSED */
static int
verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
static int
spa_load_verify(spa_t *spa)
{
zio_t *rio;
spa_load_error_t sle = { 0 };
zpool_load_policy_t policy;
boolean_t verify_ok = B_FALSE;
int error = 0;
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_rewind & ZPOOL_NEVER_REWIND)
return (0);
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
error = dmu_objset_find_dp(spa->spa_dsl_pool,
spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
DS_FIND_CHILDREN);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
if (error != 0)
return (error);
rio = zio_root(spa, NULL, &sle,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
if (spa_load_verify_metadata) {
if (spa->spa_extreme_rewind) {
spa_load_note(spa, "performing a complete scan of the "
"pool since extreme rewind is on. This may take "
"a very long time.\n (spa_load_verify_data=%u, "
"spa_load_verify_metadata=%u)",
spa_load_verify_data, spa_load_verify_metadata);
}
error = traverse_pool(spa, spa->spa_verify_min_txg,
TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
}
(void) zio_wait(rio);
ASSERT0(spa->spa_load_verify_bytes);
spa->spa_load_meta_errors = sle.sle_meta_count;
spa->spa_load_data_errors = sle.sle_data_count;
if (sle.sle_meta_count != 0 || sle.sle_data_count != 0) {
spa_load_note(spa, "spa_load_verify found %llu metadata errors "
"and %llu data errors", (u_longlong_t)sle.sle_meta_count,
(u_longlong_t)sle.sle_data_count);
}
if (spa_load_verify_dryrun ||
(!error && sle.sle_meta_count <= policy.zlp_maxmeta &&
sle.sle_data_count <= policy.zlp_maxdata)) {
int64_t loss = 0;
verify_ok = B_TRUE;
spa->spa_load_txg = spa->spa_uberblock.ub_txg;
spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
VERIFY(nvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
VERIFY(nvlist_add_int64(spa->spa_load_info,
ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
VERIFY(nvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
} else {
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
}
if (spa_load_verify_dryrun)
return (0);
if (error) {
if (error != ENXIO && error != EIO)
error = SET_ERROR(EIO);
return (error);
}
return (verify_ok ? 0 : EIO);
}
/*
* Find a value in the pool props object.
*/
static void
spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
{
(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
}
/*
* Find a value in the pool directory object.
*/
static int
spa_dir_prop(spa_t *spa, const char *name, uint64_t *val, boolean_t log_enoent)
{
int error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
name, sizeof (uint64_t), 1, val);
if (error != 0 && (error != ENOENT || log_enoent)) {
spa_load_failed(spa, "couldn't get '%s' value in MOS directory "
"[error=%d]", name, error);
}
return (error);
}
static int
spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
{
vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
return (SET_ERROR(err));
}
boolean_t
spa_livelist_delete_check(spa_t *spa)
{
return (spa->spa_livelists_to_delete != 0);
}
/* ARGSUSED */
static boolean_t
spa_livelist_delete_cb_check(void *arg, zthr_t *z)
{
spa_t *spa = arg;
return (spa_livelist_delete_check(spa));
}
static int
delete_blkptr_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
spa_t *spa = arg;
zio_free(spa, tx->tx_txg, bp);
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
-bp_get_dsize_sync(spa, bp),
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
return (0);
}
static int
dsl_get_next_livelist_obj(objset_t *os, uint64_t zap_obj, uint64_t *llp)
{
int err;
zap_cursor_t zc;
zap_attribute_t za;
zap_cursor_init(&zc, os, zap_obj);
err = zap_cursor_retrieve(&zc, &za);
zap_cursor_fini(&zc);
if (err == 0)
*llp = za.za_first_integer;
return (err);
}
/*
* Components of livelist deletion that must be performed in syncing
* context: freeing block pointers and updating the pool-wide data
* structures to indicate how much work is left to do
*/
typedef struct sublist_delete_arg {
spa_t *spa;
dsl_deadlist_t *ll;
uint64_t key;
bplist_t *to_free;
} sublist_delete_arg_t;
static void
sublist_delete_sync(void *arg, dmu_tx_t *tx)
{
sublist_delete_arg_t *sda = arg;
spa_t *spa = sda->spa;
dsl_deadlist_t *ll = sda->ll;
uint64_t key = sda->key;
bplist_t *to_free = sda->to_free;
bplist_iterate(to_free, delete_blkptr_cb, spa, tx);
dsl_deadlist_remove_entry(ll, key, tx);
}
typedef struct livelist_delete_arg {
spa_t *spa;
uint64_t ll_obj;
uint64_t zap_obj;
} livelist_delete_arg_t;
static void
livelist_delete_sync(void *arg, dmu_tx_t *tx)
{
livelist_delete_arg_t *lda = arg;
spa_t *spa = lda->spa;
uint64_t ll_obj = lda->ll_obj;
uint64_t zap_obj = lda->zap_obj;
objset_t *mos = spa->spa_meta_objset;
uint64_t count;
/* free the livelist and decrement the feature count */
VERIFY0(zap_remove_int(mos, zap_obj, ll_obj, tx));
dsl_deadlist_free(mos, ll_obj, tx);
spa_feature_decr(spa, SPA_FEATURE_LIVELIST, tx);
VERIFY0(zap_count(mos, zap_obj, &count));
if (count == 0) {
/* no more livelists to delete */
VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DELETED_CLONES, tx));
VERIFY0(zap_destroy(mos, zap_obj, tx));
spa->spa_livelists_to_delete = 0;
spa_notify_waiters(spa);
}
}
/*
* Load in the value for the livelist to be removed and open it. Then,
* load its first sublist and determine which block pointers should actually
* be freed. Then, call a synctask which performs the actual frees and updates
* the pool-wide livelist data.
*/
/* ARGSUSED */
static void
spa_livelist_delete_cb(void *arg, zthr_t *z)
{
spa_t *spa = arg;
uint64_t ll_obj = 0, count;
objset_t *mos = spa->spa_meta_objset;
uint64_t zap_obj = spa->spa_livelists_to_delete;
/*
* Determine the next livelist to delete. This function should only
* be called if there is at least one deleted clone.
*/
VERIFY0(dsl_get_next_livelist_obj(mos, zap_obj, &ll_obj));
VERIFY0(zap_count(mos, ll_obj, &count));
if (count > 0) {
dsl_deadlist_t *ll;
dsl_deadlist_entry_t *dle;
bplist_t to_free;
ll = kmem_zalloc(sizeof (dsl_deadlist_t), KM_SLEEP);
dsl_deadlist_open(ll, mos, ll_obj);
dle = dsl_deadlist_first(ll);
ASSERT3P(dle, !=, NULL);
bplist_create(&to_free);
int err = dsl_process_sub_livelist(&dle->dle_bpobj, &to_free,
z, NULL);
if (err == 0) {
sublist_delete_arg_t sync_arg = {
.spa = spa,
.ll = ll,
.key = dle->dle_mintxg,
.to_free = &to_free
};
zfs_dbgmsg("deleting sublist (id %llu) from"
" livelist %llu, %lld remaining",
(u_longlong_t)dle->dle_bpobj.bpo_object,
(u_longlong_t)ll_obj, (longlong_t)count - 1);
VERIFY0(dsl_sync_task(spa_name(spa), NULL,
sublist_delete_sync, &sync_arg, 0,
ZFS_SPACE_CHECK_DESTROY));
} else {
VERIFY3U(err, ==, EINTR);
}
bplist_clear(&to_free);
bplist_destroy(&to_free);
dsl_deadlist_close(ll);
kmem_free(ll, sizeof (dsl_deadlist_t));
} else {
livelist_delete_arg_t sync_arg = {
.spa = spa,
.ll_obj = ll_obj,
.zap_obj = zap_obj
};
zfs_dbgmsg("deletion of livelist %llu completed",
(u_longlong_t)ll_obj);
VERIFY0(dsl_sync_task(spa_name(spa), NULL, livelist_delete_sync,
&sync_arg, 0, ZFS_SPACE_CHECK_DESTROY));
}
}
static void
spa_start_livelist_destroy_thread(spa_t *spa)
{
ASSERT3P(spa->spa_livelist_delete_zthr, ==, NULL);
spa->spa_livelist_delete_zthr =
zthr_create("z_livelist_destroy",
spa_livelist_delete_cb_check, spa_livelist_delete_cb, spa,
minclsyspri);
}
typedef struct livelist_new_arg {
bplist_t *allocs;
bplist_t *frees;
} livelist_new_arg_t;
static int
livelist_track_new_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(tx == NULL);
livelist_new_arg_t *lna = arg;
if (bp_freed) {
bplist_append(lna->frees, bp);
} else {
bplist_append(lna->allocs, bp);
zfs_livelist_condense_new_alloc++;
}
return (0);
}
typedef struct livelist_condense_arg {
spa_t *spa;
bplist_t to_keep;
uint64_t first_size;
uint64_t next_size;
} livelist_condense_arg_t;
static void
spa_livelist_condense_sync(void *arg, dmu_tx_t *tx)
{
livelist_condense_arg_t *lca = arg;
spa_t *spa = lca->spa;
bplist_t new_frees;
dsl_dataset_t *ds = spa->spa_to_condense.ds;
/* Have we been cancelled? */
if (spa->spa_to_condense.cancelled) {
zfs_livelist_condense_sync_cancel++;
goto out;
}
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
dsl_deadlist_t *ll = &ds->ds_dir->dd_livelist;
/*
* It's possible that the livelist was changed while the zthr was
* running. Therefore, we need to check for new blkptrs in the two
* entries being condensed and continue to track them in the livelist.
* Because of the way we handle remapped blkptrs (see dbuf_remap_impl),
* it's possible that the newly added blkptrs are FREEs or ALLOCs so
* we need to sort them into two different bplists.
*/
uint64_t first_obj = first->dle_bpobj.bpo_object;
uint64_t next_obj = next->dle_bpobj.bpo_object;
uint64_t cur_first_size = first->dle_bpobj.bpo_phys->bpo_num_blkptrs;
uint64_t cur_next_size = next->dle_bpobj.bpo_phys->bpo_num_blkptrs;
bplist_create(&new_frees);
livelist_new_arg_t new_bps = {
.allocs = &lca->to_keep,
.frees = &new_frees,
};
if (cur_first_size > lca->first_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&first->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->first_size));
}
if (cur_next_size > lca->next_size) {
VERIFY0(livelist_bpobj_iterate_from_nofree(&next->dle_bpobj,
livelist_track_new_cb, &new_bps, lca->next_size));
}
dsl_deadlist_clear_entry(first, ll, tx);
ASSERT(bpobj_is_empty(&first->dle_bpobj));
dsl_deadlist_remove_entry(ll, next->dle_mintxg, tx);
bplist_iterate(&lca->to_keep, dsl_deadlist_insert_alloc_cb, ll, tx);
bplist_iterate(&new_frees, dsl_deadlist_insert_free_cb, ll, tx);
bplist_destroy(&new_frees);
char dsname[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("txg %llu condensing livelist of %s (id %llu), bpobj %llu "
"(%llu blkptrs) and bpobj %llu (%llu blkptrs) -> bpobj %llu "
"(%llu blkptrs)", (u_longlong_t)tx->tx_txg, dsname,
(u_longlong_t)ds->ds_object, (u_longlong_t)first_obj,
(u_longlong_t)cur_first_size, (u_longlong_t)next_obj,
(u_longlong_t)cur_next_size,
(u_longlong_t)first->dle_bpobj.bpo_object,
(u_longlong_t)first->dle_bpobj.bpo_phys->bpo_num_blkptrs);
out:
dmu_buf_rele(ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
spa->spa_to_condense.syncing = B_FALSE;
}
static void
spa_livelist_condense_cb(void *arg, zthr_t *t)
{
while (zfs_livelist_condense_zthr_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
spa_t *spa = arg;
dsl_deadlist_entry_t *first = spa->spa_to_condense.first;
dsl_deadlist_entry_t *next = spa->spa_to_condense.next;
uint64_t first_size, next_size;
livelist_condense_arg_t *lca =
kmem_alloc(sizeof (livelist_condense_arg_t), KM_SLEEP);
bplist_create(&lca->to_keep);
/*
* Process the livelists (matching FREEs and ALLOCs) in open context
* so we have minimal work in syncing context to condense.
*
* We save bpobj sizes (first_size and next_size) to use later in
* syncing context to determine if entries were added to these sublists
* while in open context. This is possible because the clone is still
* active and open for normal writes and we want to make sure the new,
* unprocessed blockpointers are inserted into the livelist normally.
*
* Note that dsl_process_sub_livelist() both stores the size number of
* blockpointers and iterates over them while the bpobj's lock held, so
* the sizes returned to us are consistent which what was actually
* processed.
*/
int err = dsl_process_sub_livelist(&first->dle_bpobj, &lca->to_keep, t,
&first_size);
if (err == 0)
err = dsl_process_sub_livelist(&next->dle_bpobj, &lca->to_keep,
t, &next_size);
if (err == 0) {
while (zfs_livelist_condense_sync_pause &&
!(zthr_has_waiters(t) || zthr_iscancelled(t)))
delay(1);
dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
dmu_tx_mark_netfree(tx);
dmu_tx_hold_space(tx, 1);
err = dmu_tx_assign(tx, TXG_NOWAIT | TXG_NOTHROTTLE);
if (err == 0) {
/*
* Prevent the condense zthr restarting before
* the synctask completes.
*/
spa->spa_to_condense.syncing = B_TRUE;
lca->spa = spa;
lca->first_size = first_size;
lca->next_size = next_size;
dsl_sync_task_nowait(spa_get_dsl(spa),
spa_livelist_condense_sync, lca, tx);
dmu_tx_commit(tx);
return;
}
}
/*
* Condensing can not continue: either it was externally stopped or
* we were unable to assign to a tx because the pool has run out of
* space. In the second case, we'll just end up trying to condense
* again in a later txg.
*/
ASSERT(err != 0);
bplist_clear(&lca->to_keep);
bplist_destroy(&lca->to_keep);
kmem_free(lca, sizeof (livelist_condense_arg_t));
dmu_buf_rele(spa->spa_to_condense.ds->ds_dbuf, spa);
spa->spa_to_condense.ds = NULL;
if (err == EINTR)
zfs_livelist_condense_zthr_cancel++;
}
/* ARGSUSED */
/*
* Check that there is something to condense but that a condense is not
* already in progress and that condensing has not been cancelled.
*/
static boolean_t
spa_livelist_condense_cb_check(void *arg, zthr_t *z)
{
spa_t *spa = arg;
if ((spa->spa_to_condense.ds != NULL) &&
(spa->spa_to_condense.syncing == B_FALSE) &&
(spa->spa_to_condense.cancelled == B_FALSE)) {
return (B_TRUE);
}
return (B_FALSE);
}
static void
spa_start_livelist_condensing_thread(spa_t *spa)
{
spa->spa_to_condense.ds = NULL;
spa->spa_to_condense.first = NULL;
spa->spa_to_condense.next = NULL;
spa->spa_to_condense.syncing = B_FALSE;
spa->spa_to_condense.cancelled = B_FALSE;
ASSERT3P(spa->spa_livelist_condense_zthr, ==, NULL);
spa->spa_livelist_condense_zthr =
zthr_create("z_livelist_condense",
spa_livelist_condense_cb_check,
spa_livelist_condense_cb, spa, minclsyspri);
}
static void
spa_spawn_aux_threads(spa_t *spa)
{
ASSERT(spa_writeable(spa));
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_start_indirect_condensing_thread(spa);
spa_start_livelist_destroy_thread(spa);
spa_start_livelist_condensing_thread(spa);
ASSERT3P(spa->spa_checkpoint_discard_zthr, ==, NULL);
spa->spa_checkpoint_discard_zthr =
zthr_create("z_checkpoint_discard",
spa_checkpoint_discard_thread_check,
spa_checkpoint_discard_thread, spa, minclsyspri);
}
/*
* Fix up config after a partly-completed split. This is done with the
* ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
* pool have that entry in their config, but only the splitting one contains
* a list of all the guids of the vdevs that are being split off.
*
* This function determines what to do with that list: either rejoin
* all the disks to the pool, or complete the splitting process. To attempt
* the rejoin, each disk that is offlined is marked online again, and
* we do a reopen() call. If the vdev label for every disk that was
* marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
* then we call vdev_split() on each disk, and complete the split.
*
* Otherwise we leave the config alone, with all the vdevs in place in
* the original pool.
*/
static void
spa_try_repair(spa_t *spa, nvlist_t *config)
{
uint_t extracted;
uint64_t *glist;
uint_t i, gcount;
nvlist_t *nvl;
vdev_t **vd;
boolean_t attempt_reopen;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
return;
/* check that the config is complete */
if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
&glist, &gcount) != 0)
return;
vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
/* attempt to online all the vdevs & validate */
attempt_reopen = B_TRUE;
for (i = 0; i < gcount; i++) {
if (glist[i] == 0) /* vdev is hole */
continue;
vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
if (vd[i] == NULL) {
/*
* Don't bother attempting to reopen the disks;
* just do the split.
*/
attempt_reopen = B_FALSE;
} else {
/* attempt to re-online it */
vd[i]->vdev_offline = B_FALSE;
}
}
if (attempt_reopen) {
vdev_reopen(spa->spa_root_vdev);
/* check each device to see what state it's in */
for (extracted = 0, i = 0; i < gcount; i++) {
if (vd[i] != NULL &&
vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
break;
++extracted;
}
}
/*
* If every disk has been moved to the new pool, or if we never
* even attempted to look at them, then we split them off for
* good.
*/
if (!attempt_reopen || gcount == extracted) {
for (i = 0; i < gcount; i++)
if (vd[i] != NULL)
vdev_split(vd[i]);
vdev_reopen(spa->spa_root_vdev);
}
kmem_free(vd, gcount * sizeof (vdev_t *));
}
static int
spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type)
{
char *ereport = FM_EREPORT_ZFS_POOL;
int error;
spa->spa_load_state = state;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
gethrestime(&spa->spa_loaded_ts);
error = spa_load_impl(spa, type, &ereport);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
if (error) {
if (error != EEXIST) {
spa->spa_loaded_ts.tv_sec = 0;
spa->spa_loaded_ts.tv_nsec = 0;
}
if (error != EBADF) {
(void) zfs_ereport_post(ereport, spa,
NULL, NULL, NULL, 0);
}
}
spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
spa->spa_ena = 0;
(void) spa_import_progress_set_state(spa_guid(spa),
spa_load_state(spa));
return (error);
}
#ifdef ZFS_DEBUG
/*
* Count the number of per-vdev ZAPs associated with all of the vdevs in the
* vdev tree rooted in the given vd, and ensure that each ZAP is present in the
* spa's per-vdev ZAP list.
*/
static uint64_t
vdev_count_verify_zaps(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
uint64_t total = 0;
if (vd->vdev_top_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_top_zap));
}
if (vd->vdev_leaf_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
total += vdev_count_verify_zaps(vd->vdev_child[i]);
}
return (total);
}
#endif
/*
* Determine whether the activity check is required.
*/
static boolean_t
spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
nvlist_t *config)
{
uint64_t state = 0;
uint64_t hostid = 0;
uint64_t tryconfig_txg = 0;
uint64_t tryconfig_timestamp = 0;
uint16_t tryconfig_mmp_seq = 0;
nvlist_t *nvinfo;
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
&tryconfig_txg);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
&tryconfig_timestamp);
(void) nvlist_lookup_uint16(nvinfo, ZPOOL_CONFIG_MMP_SEQ,
&tryconfig_mmp_seq);
}
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
/*
* Disable the MMP activity check - This is used by zdb which
* is intended to be used on potentially active pools.
*/
if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
return (B_FALSE);
/*
* Skip the activity check when the MMP feature is disabled.
*/
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
return (B_FALSE);
/*
* If the tryconfig_ values are nonzero, they are the results of an
* earlier tryimport. If they all match the uberblock we just found,
* then the pool has not changed and we return false so we do not test
* a second time.
*/
if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp &&
tryconfig_mmp_seq && tryconfig_mmp_seq ==
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0))
return (B_FALSE);
/*
* Allow the activity check to be skipped when importing the pool
* on the same host which last imported it. Since the hostid from
* configuration may be stale use the one read from the label.
*/
if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
if (hostid == spa_get_hostid(spa))
return (B_FALSE);
/*
* Skip the activity test when the pool was cleanly exported.
*/
if (state != POOL_STATE_ACTIVE)
return (B_FALSE);
return (B_TRUE);
}
/*
* Nanoseconds the activity check must watch for changes on-disk.
*/
static uint64_t
spa_activity_check_duration(spa_t *spa, uberblock_t *ub)
{
uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
uint64_t multihost_interval = MSEC2NSEC(
MMP_INTERVAL_OK(zfs_multihost_interval));
uint64_t import_delay = MAX(NANOSEC, import_intervals *
multihost_interval);
/*
* Local tunables determine a minimum duration except for the case
* where we know when the remote host will suspend the pool if MMP
* writes do not land.
*
* See Big Theory comment at the top of mmp.c for the reasoning behind
* these cases and times.
*/
ASSERT(MMP_IMPORT_SAFETY_FACTOR >= 100);
if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) > 0) {
/* MMP on remote host will suspend pool after failed writes */
import_delay = MMP_FAIL_INT(ub) * MSEC2NSEC(MMP_INTERVAL(ub)) *
MMP_IMPORT_SAFETY_FACTOR / 100;
zfs_dbgmsg("fail_intvals>0 import_delay=%llu ub_mmp "
"mmp_fails=%llu ub_mmp mmp_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_FAIL_INT(ub),
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)import_intervals);
} else if (MMP_INTERVAL_VALID(ub) && MMP_FAIL_INT_VALID(ub) &&
MMP_FAIL_INT(ub) == 0) {
/* MMP on remote host will never suspend pool */
import_delay = MAX(import_delay, (MSEC2NSEC(MMP_INTERVAL(ub)) +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("fail_intvals=0 import_delay=%llu ub_mmp "
"mmp_interval=%llu ub_mmp_delay=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)MMP_INTERVAL(ub),
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals);
} else if (MMP_VALID(ub)) {
/*
* zfs-0.7 compatibility case
*/
import_delay = MAX(import_delay, (multihost_interval +
ub->ub_mmp_delay) * import_intervals);
zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu "
"import_intervals=%llu leaves=%u",
(u_longlong_t)import_delay,
(u_longlong_t)ub->ub_mmp_delay,
(u_longlong_t)import_intervals,
vdev_count_leaves(spa));
} else {
/* Using local tunings is the only reasonable option */
zfs_dbgmsg("pool last imported on non-MMP aware "
"host using import_delay=%llu multihost_interval=%llu "
"import_intervals=%llu", (u_longlong_t)import_delay,
(u_longlong_t)multihost_interval,
(u_longlong_t)import_intervals);
}
return (import_delay);
}
/*
* Perform the import activity check. If the user canceled the import or
* we detected activity then fail.
*/
static int
spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
{
uint64_t txg = ub->ub_txg;
uint64_t timestamp = ub->ub_timestamp;
uint64_t mmp_config = ub->ub_mmp_config;
uint16_t mmp_seq = MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0;
uint64_t import_delay;
hrtime_t import_expire;
nvlist_t *mmp_label = NULL;
vdev_t *rvd = spa->spa_root_vdev;
kcondvar_t cv;
kmutex_t mtx;
int error = 0;
cv_init(&cv, NULL, CV_DEFAULT, NULL);
mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_enter(&mtx);
/*
* If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
* during the earlier tryimport. If the txg recorded there is 0 then
* the pool is known to be active on another host.
*
* Otherwise, the pool might be in use on another host. Check for
* changes in the uberblocks on disk if necessary.
*/
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
vdev_uberblock_load(rvd, ub, &mmp_label);
error = SET_ERROR(EREMOTEIO);
goto out;
}
}
import_delay = spa_activity_check_duration(spa, ub);
/* Add a small random factor in case of simultaneous imports (0-25%) */
import_delay += import_delay * random_in_range(250) / 1000;
import_expire = gethrtime() + import_delay;
while (gethrtime() < import_expire) {
(void) spa_import_progress_set_mmp_check(spa_guid(spa),
NSEC2SEC(import_expire - gethrtime()));
vdev_uberblock_load(rvd, ub, &mmp_label);
if (txg != ub->ub_txg || timestamp != ub->ub_timestamp ||
mmp_seq != (MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0)) {
zfs_dbgmsg("multihost activity detected "
"txg %llu ub_txg %llu "
"timestamp %llu ub_timestamp %llu "
"mmp_config %#llx ub_mmp_config %#llx",
(u_longlong_t)txg, (u_longlong_t)ub->ub_txg,
(u_longlong_t)timestamp,
(u_longlong_t)ub->ub_timestamp,
(u_longlong_t)mmp_config,
(u_longlong_t)ub->ub_mmp_config);
error = SET_ERROR(EREMOTEIO);
break;
}
if (mmp_label) {
nvlist_free(mmp_label);
mmp_label = NULL;
}
error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
if (error != -1) {
error = SET_ERROR(EINTR);
break;
}
error = 0;
}
out:
mutex_exit(&mtx);
mutex_destroy(&mtx);
cv_destroy(&cv);
/*
* If the pool is determined to be active store the status in the
* spa->spa_load_info nvlist. If the remote hostname or hostid are
* available from configuration read from disk store them as well.
* This allows 'zpool import' to generate a more useful message.
*
* ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory)
* ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
* ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
*/
if (error == EREMOTEIO) {
char *hostname = "<unknown>";
uint64_t hostid = 0;
if (mmp_label) {
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
hostname = fnvlist_lookup_string(mmp_label,
ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
}
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
hostid = fnvlist_lookup_uint64(mmp_label,
ZPOOL_CONFIG_HOSTID);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTID, hostid);
}
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, 0);
error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
}
if (mmp_label)
nvlist_free(mmp_label);
return (error);
}
static int
spa_verify_host(spa_t *spa, nvlist_t *mos_config)
{
uint64_t hostid;
char *hostname;
uint64_t myhostid = 0;
if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
hostname = fnvlist_lookup_string(mos_config,
ZPOOL_CONFIG_HOSTNAME);
myhostid = zone_get_hostid(NULL);
if (hostid != 0 && myhostid != 0 && hostid != myhostid) {
cmn_err(CE_WARN, "pool '%s' could not be "
"loaded as it was last accessed by "
"another system (host: %s hostid: 0x%llx). "
"See: https://openzfs.github.io/openzfs-docs/msg/"
"ZFS-8000-EY",
spa_name(spa), hostname, (u_longlong_t)hostid);
spa_load_failed(spa, "hostid verification failed: pool "
"last accessed by host: %s (hostid: 0x%llx)",
hostname, (u_longlong_t)hostid);
return (SET_ERROR(EBADF));
}
}
return (0);
}
static int
spa_ld_parse_config(spa_t *spa, spa_import_type_t type)
{
int error = 0;
nvlist_t *nvtree, *nvl, *config = spa->spa_config;
int parse;
vdev_t *rvd;
uint64_t pool_guid;
char *comment;
char *compatibility;
/*
* Versioning wasn't explicitly added to the label until later, so if
* it's not present treat it as the initial version.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&spa->spa_ubsync.ub_version) != 0)
spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_POOL_GUID);
return (SET_ERROR(EINVAL));
}
/*
* If we are doing an import, ensure that the pool is not already
* imported by checking if its pool guid already exists in the
* spa namespace.
*
* The only case that we allow an already imported pool to be
* imported again, is when the pool is checkpointed and we want to
* look at its checkpointed state from userland tools like zdb.
*/
#ifdef _KERNEL
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0)) {
#else
if ((spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0) &&
!spa_importing_readonly_checkpoint(spa)) {
#endif
spa_load_failed(spa, "a pool with guid %llu is already open",
(u_longlong_t)pool_guid);
return (SET_ERROR(EEXIST));
}
spa->spa_config_guid = pool_guid;
nvlist_free(spa->spa_load_info);
spa->spa_load_info = fnvlist_alloc();
ASSERT(spa->spa_comment == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
spa->spa_comment = spa_strdup(comment);
ASSERT(spa->spa_compatibility == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMPATIBILITY,
&compatibility) == 0)
spa->spa_compatibility = spa_strdup(compatibility);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&spa->spa_config_txg);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) == 0)
spa->spa_config_splitting = fnvlist_dup(nvl);
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvtree)) {
spa_load_failed(spa, "invalid config provided: '%s' missing",
ZPOOL_CONFIG_VDEV_TREE);
return (SET_ERROR(EINVAL));
}
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Parse the configuration into a vdev tree. We explicitly set the
* value that will be returned by spa_version() since parsing the
* configuration requires knowing the version number.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
parse = (type == SPA_IMPORT_EXISTING ?
VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
error = spa_config_parse(spa, &rvd, nvtree, NULL, 0, parse);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "unable to parse config [error=%d]",
error);
return (error);
}
ASSERT(spa->spa_root_vdev == rvd);
ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
if (type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_guid(spa) == pool_guid);
}
return (0);
}
/*
* Recursively open all vdevs in the vdev tree. This function is called twice:
* first with the untrusted config, then with the trusted config.
*/
static int
spa_ld_open_vdevs(spa_t *spa)
{
int error = 0;
/*
* spa_missing_tvds_allowed defines how many top-level vdevs can be
* missing/unopenable for the root vdev to be still considered openable.
*/
if (spa->spa_trust_config) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_CACHEFILE) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_cachefile;
} else if (spa->spa_config_source == SPA_CONFIG_SRC_SCAN) {
spa->spa_missing_tvds_allowed = zfs_max_missing_tvds_scan;
} else {
spa->spa_missing_tvds_allowed = 0;
}
spa->spa_missing_tvds_allowed =
MAX(zfs_max_missing_tvds, spa->spa_missing_tvds_allowed);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_open(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "vdev tree has %lld missing top-level "
"vdevs.", (u_longlong_t)spa->spa_missing_tvds);
if (spa->spa_trust_config && (spa->spa_mode & SPA_MODE_WRITE)) {
/*
* Although theoretically we could allow users to open
* incomplete pools in RW mode, we'd need to add a lot
* of extra logic (e.g. adjust pool space to account
* for missing vdevs).
* This limitation also prevents users from accidentally
* opening the pool in RW mode during data recovery and
* damaging it further.
*/
spa_load_note(spa, "pools with missing top-level "
"vdevs can only be opened in read-only mode.");
error = SET_ERROR(ENXIO);
} else {
spa_load_note(spa, "current settings allow for maximum "
"%lld missing top-level vdevs at this stage.",
(u_longlong_t)spa->spa_missing_tvds_allowed);
}
}
if (error != 0) {
spa_load_failed(spa, "unable to open vdev tree [error=%d]",
error);
}
if (spa->spa_missing_tvds != 0 || error != 0)
vdev_dbgmsg_print_tree(spa->spa_root_vdev, 2);
return (error);
}
/*
* We need to validate the vdev labels against the configuration that
* we have in hand. This function is called twice: first with an untrusted
* config, then with a trusted config. The validation is more strict when the
* config is trusted.
*/
static int
spa_ld_validate_vdevs(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_validate(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "vdev_validate failed [error=%d]", error);
return (error);
}
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN) {
spa_load_failed(spa, "cannot open vdev tree after invalidating "
"some vdevs");
vdev_dbgmsg_print_tree(rvd, 2);
return (SET_ERROR(ENXIO));
}
return (0);
}
static void
spa_ld_select_uberblock_done(spa_t *spa, uberblock_t *ub)
{
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
spa->spa_first_txg = spa->spa_last_ubsync_txg ?
spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
spa->spa_claim_max_txg = spa->spa_first_txg;
spa->spa_prev_software_version = ub->ub_software_version;
}
static int
spa_ld_select_uberblock(spa_t *spa, spa_import_type_t type)
{
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *label;
uberblock_t *ub = &spa->spa_uberblock;
boolean_t activity_check = B_FALSE;
/*
* If we are opening the checkpointed state of the pool by
* rewinding to it, at this point we will have written the
* checkpointed uberblock to the vdev labels, so searching
* the labels will find the right uberblock. However, if
* we are opening the checkpointed state read-only, we have
* not modified the labels. Therefore, we must ignore the
* labels and continue using the spa_uberblock that was set
* by spa_ld_checkpoint_rewind.
*
* Note that it would be fine to ignore the labels when
* rewinding (opening writeable) as well. However, if we
* crash just after writing the labels, we will end up
* searching the labels. Doing so in the common case means
* that this code path gets exercised normally, rather than
* just in the edge case.
*/
if (ub->ub_checkpoint_txg != 0 &&
spa_importing_readonly_checkpoint(spa)) {
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
/*
* Find the best uberblock.
*/
vdev_uberblock_load(rvd, ub, &label);
/*
* If we weren't able to find a single valid uberblock, return failure.
*/
if (ub->ub_txg == 0) {
nvlist_free(label);
spa_load_failed(spa, "no valid uberblock found");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
}
if (spa->spa_load_max_txg != UINT64_MAX) {
(void) spa_import_progress_set_max_txg(spa_guid(spa),
(u_longlong_t)spa->spa_load_max_txg);
}
spa_load_note(spa, "using uberblock with txg=%llu",
(u_longlong_t)ub->ub_txg);
/*
* For pools which have the multihost property on determine if the
* pool is truly inactive and can be safely imported. Prevent
* hosts which don't have a hostid set from importing the pool.
*/
activity_check = spa_activity_check_required(spa, ub, label,
spa->spa_config);
if (activity_check) {
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
spa_get_hostid(spa) == 0) {
nvlist_free(label);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
int error = spa_activity_check(spa, ub, spa->spa_config);
if (error) {
nvlist_free(label);
return (error);
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
fnvlist_add_uint16(spa->spa_load_info,
ZPOOL_CONFIG_MMP_SEQ,
(MMP_SEQ_VALID(ub) ? MMP_SEQ(ub) : 0));
}
/*
* If the pool has an unsupported version we can't open it.
*/
if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
nvlist_free(label);
spa_load_failed(spa, "version %llu is not supported",
(u_longlong_t)ub->ub_version);
return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
}
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *features;
/*
* If we weren't able to find what's necessary for reading the
* MOS in the label, return failure.
*/
if (label == NULL) {
spa_load_failed(spa, "label config unavailable");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) != 0) {
nvlist_free(label);
spa_load_failed(spa, "invalid label: '%s' missing",
ZPOOL_CONFIG_FEATURES_FOR_READ);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
/*
* Update our in-core representation with the definitive values
* from the label.
*/
nvlist_free(spa->spa_label_features);
VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
}
nvlist_free(label);
/*
* Look through entries in the label nvlist's features_for_read. If
* there is a feature listed there which we don't understand then we
* cannot open a pool.
*/
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *unsup_feat;
VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
0);
for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
NULL); nvp != NULL;
nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
if (!zfeature_is_supported(nvpair_name(nvp))) {
VERIFY(nvlist_add_string(unsup_feat,
nvpair_name(nvp), "") == 0);
}
}
if (!nvlist_empty(unsup_feat)) {
VERIFY(nvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
nvlist_free(unsup_feat);
spa_load_failed(spa, "some features are unsupported");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
nvlist_free(unsup_feat);
}
if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_try_repair(spa, spa->spa_config);
spa_config_exit(spa, SCL_ALL, FTAG);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
}
/*
* Initialize internal SPA structures.
*/
spa_ld_select_uberblock_done(spa, ub);
return (0);
}
static int
spa_ld_open_rootbp(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
if (error != 0) {
spa_load_failed(spa, "unable to open rootbp in dsl_pool_init "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
return (0);
}
static int
spa_ld_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t reloading)
{
vdev_t *mrvd, *rvd = spa->spa_root_vdev;
nvlist_t *nv, *mos_config, *policy;
int error = 0, copy_error;
uint64_t healthy_tvds, healthy_tvds_mos;
uint64_t mos_config_txg;
if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object, B_TRUE)
!= 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* If we're assembling a pool from a split, the config provided is
* already trusted so there is nothing to do.
*/
if (type == SPA_IMPORT_ASSEMBLE)
return (0);
healthy_tvds = spa_healthy_core_tvds(spa);
if (load_nvlist(spa, spa->spa_config_object, &mos_config)
!= 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* If we are doing an open, pool owner wasn't verified yet, thus do
* the verification here.
*/
if (spa->spa_load_state == SPA_LOAD_OPEN) {
error = spa_verify_host(spa, mos_config);
if (error != 0) {
nvlist_free(mos_config);
return (error);
}
}
nv = fnvlist_lookup_nvlist(mos_config, ZPOOL_CONFIG_VDEV_TREE);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Build a new vdev tree from the trusted config
*/
error = spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD);
if (error != 0) {
nvlist_free(mos_config);
spa_config_exit(spa, SCL_ALL, FTAG);
spa_load_failed(spa, "spa_config_parse failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Vdev paths in the MOS may be obsolete. If the untrusted config was
* obtained by scanning /dev/dsk, then it will have the right vdev
* paths. We update the trusted MOS config with this information.
* We first try to copy the paths with vdev_copy_path_strict, which
* succeeds only when both configs have exactly the same vdev tree.
* If that fails, we fall back to a more flexible method that has a
* best effort policy.
*/
copy_error = vdev_copy_path_strict(rvd, mrvd);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "provided vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
spa_load_note(spa, "MOS vdev tree:");
vdev_dbgmsg_print_tree(mrvd, 2);
}
if (copy_error != 0) {
spa_load_note(spa, "vdev_copy_path_strict failed, falling "
"back to vdev_copy_path_relaxed");
vdev_copy_path_relaxed(rvd, mrvd);
}
vdev_close(rvd);
vdev_free(rvd);
spa->spa_root_vdev = mrvd;
rvd = mrvd;
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* We will use spa_config if we decide to reload the spa or if spa_load
* fails and we rewind. We must thus regenerate the config using the
* MOS information with the updated paths. ZPOOL_LOAD_POLICY is used to
* pass settings on how to load the pool and is not stored in the MOS.
* We copy it over to our new, trusted config.
*/
mos_config_txg = fnvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_POOL_TXG);
nvlist_free(mos_config);
mos_config = spa_config_generate(spa, NULL, mos_config_txg, B_FALSE);
if (nvlist_lookup_nvlist(spa->spa_config, ZPOOL_LOAD_POLICY,
&policy) == 0)
fnvlist_add_nvlist(mos_config, ZPOOL_LOAD_POLICY, policy);
spa_config_set(spa, mos_config);
spa->spa_config_source = SPA_CONFIG_SRC_MOS;
/*
* Now that we got the config from the MOS, we should be more strict
* in checking blkptrs and can make assumptions about the consistency
* of the vdev tree. spa_trust_config must be set to true before opening
* vdevs in order for them to be writeable.
*/
spa->spa_trust_config = B_TRUE;
/*
* Open and validate the new vdev tree
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
if (copy_error != 0 || spa_load_print_vdev_tree) {
spa_load_note(spa, "final vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
}
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT &&
!spa->spa_extreme_rewind && zfs_max_missing_tvds == 0) {
/*
* Sanity check to make sure that we are indeed loading the
* latest uberblock. If we missed SPA_SYNC_MIN_VDEVS tvds
* in the config provided and they happened to be the only ones
* to have the latest uberblock, we could involuntarily perform
* an extreme rewind.
*/
healthy_tvds_mos = spa_healthy_core_tvds(spa);
if (healthy_tvds_mos - healthy_tvds >=
SPA_SYNC_MIN_VDEVS) {
spa_load_note(spa, "config provided misses too many "
"top-level vdevs compared to MOS (%lld vs %lld). ",
(u_longlong_t)healthy_tvds,
(u_longlong_t)healthy_tvds_mos);
spa_load_note(spa, "vdev tree:");
vdev_dbgmsg_print_tree(rvd, 2);
if (reloading) {
spa_load_failed(spa, "config was already "
"provided from MOS. Aborting.");
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_load_note(spa, "spa must be reloaded using MOS "
"config");
return (SET_ERROR(EAGAIN));
}
}
error = spa_check_for_missing_logs(spa);
if (error != 0)
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
if (rvd->vdev_guid_sum != spa->spa_uberblock.ub_guid_sum) {
spa_load_failed(spa, "uberblock guid sum doesn't match MOS "
"guid sum (%llu != %llu)",
(u_longlong_t)spa->spa_uberblock.ub_guid_sum,
(u_longlong_t)rvd->vdev_guid_sum);
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
ENXIO));
}
return (0);
}
static int
spa_ld_open_indirect_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* Everything that we read before spa_remove_init() must be stored
* on concreted vdevs. Therefore we do this as early as possible.
*/
error = spa_remove_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_remove_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Retrieve information needed to condense indirect vdev mappings.
*/
error = spa_condense_init(spa);
if (error != 0) {
spa_load_failed(spa, "spa_condense_init failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
return (0);
}
static int
spa_ld_check_features(spa_t *spa, boolean_t *missing_feat_writep)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
if (spa_version(spa) >= SPA_VERSION_FEATURES) {
boolean_t missing_feat_read = B_FALSE;
nvlist_t *unsup_feat, *enabled_feat;
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
&spa->spa_feat_for_read_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
&spa->spa_feat_for_write_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
&spa->spa_feat_desc_obj, B_TRUE) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
enabled_feat = fnvlist_alloc();
unsup_feat = fnvlist_alloc();
if (!spa_features_check(spa, B_FALSE,
unsup_feat, enabled_feat))
missing_feat_read = B_TRUE;
if (spa_writeable(spa) ||
spa->spa_load_state == SPA_LOAD_TRYIMPORT) {
if (!spa_features_check(spa, B_TRUE,
unsup_feat, enabled_feat)) {
*missing_feat_writep = B_TRUE;
}
}
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
if (!nvlist_empty(unsup_feat)) {
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
}
fnvlist_free(enabled_feat);
fnvlist_free(unsup_feat);
if (!missing_feat_read) {
fnvlist_add_boolean(spa->spa_load_info,
ZPOOL_CONFIG_CAN_RDONLY);
}
/*
* If the state is SPA_LOAD_TRYIMPORT, our objective is
* twofold: to determine whether the pool is available for
* import in read-write mode and (if it is not) whether the
* pool is available for import in read-only mode. If the pool
* is available for import in read-write mode, it is displayed
* as available in userland; if it is not available for import
* in read-only mode, it is displayed as unavailable in
* userland. If the pool is available for import in read-only
* mode but not read-write mode, it is displayed as unavailable
* in userland with a special note that the pool is actually
* available for open in read-only mode.
*
* As a result, if the state is SPA_LOAD_TRYIMPORT and we are
* missing a feature for write, we must first determine whether
* the pool can be opened read-only before returning to
* userland in order to know whether to display the
* abovementioned note.
*/
if (missing_feat_read || (*missing_feat_writep &&
spa_writeable(spa))) {
spa_load_failed(spa, "pool uses unsupported features");
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Load refcounts for ZFS features from disk into an in-memory
* cache during SPA initialization.
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
uint64_t refcount;
error = feature_get_refcount_from_disk(spa,
&spa_feature_table[i], &refcount);
if (error == 0) {
spa->spa_feat_refcount_cache[i] = refcount;
} else if (error == ENOTSUP) {
spa->spa_feat_refcount_cache[i] =
SPA_FEATURE_DISABLED;
} else {
spa_load_failed(spa, "error getting refcount "
"for feature %s [error=%d]",
spa_feature_table[i].fi_guid, error);
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
}
}
if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
&spa->spa_feat_enabled_txg_obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Encryption was added before bookmark_v2, even though bookmark_v2
* is now a dependency. If this pool has encryption enabled without
* bookmark_v2, trigger an errata message.
*/
if (spa_feature_is_enabled(spa, SPA_FEATURE_ENCRYPTION) &&
!spa_feature_is_enabled(spa, SPA_FEATURE_BOOKMARK_V2)) {
spa->spa_errata = ZPOOL_ERRATA_ZOL_8308_ENCRYPTION;
}
return (0);
}
static int
spa_ld_load_special_directories(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
spa->spa_is_initializing = B_TRUE;
error = dsl_pool_open(spa->spa_dsl_pool);
spa->spa_is_initializing = B_FALSE;
if (error != 0) {
spa_load_failed(spa, "dsl_pool_open failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_get_props(spa_t *spa)
{
int error = 0;
uint64_t obj;
vdev_t *rvd = spa->spa_root_vdev;
/* Grab the checksum salt from the MOS. */
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes);
if (error == ENOENT) {
/* Generate a new salt for subsequent use */
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
} else if (error != 0) {
spa_load_failed(spa, "unable to retrieve checksum salt from "
"MOS [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj, B_TRUE) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
if (error != 0) {
spa_load_failed(spa, "error opening deferred-frees bpobj "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
/*
* Load the bit that tells us to use the new accounting function
* (raid-z deflation). If we have an older pool, this will not
* be present.
*/
error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
&spa->spa_creation_version, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the persistent error log. If we have an older pool, this will
* not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
&spa->spa_errlog_scrub, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the livelist deletion field. If a livelist is queued for
* deletion, indicate that in the spa
*/
error = spa_dir_prop(spa, DMU_POOL_DELETED_CLONES,
&spa->spa_livelists_to_delete, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the history object. If we have an older pool, this
* will not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the per-vdev ZAP map. If we have an older pool, this will not
* be present; in this case, defer its creation to a later time to
* avoid dirtying the MOS this early / out of sync context. See
* spa_sync_config_object.
*/
/* The sentinel is only available in the MOS config. */
nvlist_t *mos_config;
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0) {
spa_load_failed(spa, "unable to retrieve MOS config");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
&spa->spa_all_vdev_zaps, B_FALSE);
if (error == ENOENT) {
VERIFY(!nvlist_exists(mos_config,
ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
} else if (error != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
/*
* An older version of ZFS overwrote the sentinel value, so
* we have orphaned per-vdev ZAPs in the MOS. Defer their
* destruction to later; see spa_sync_config_object.
*/
spa->spa_avz_action = AVZ_ACTION_DESTROY;
/*
* We're assuming that no vdevs have had their ZAPs created
* before this. Better be sure of it.
*/
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
}
nvlist_free(mos_config);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object,
B_FALSE);
if (error && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0) {
uint64_t autoreplace = 0;
spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
spa_prop_find(spa, ZPOOL_PROP_AUTOTRIM, &spa->spa_autotrim);
spa->spa_autoreplace = (autoreplace != 0);
}
/*
* If we are importing a pool with missing top-level vdevs,
* we enforce that the pool doesn't panic or get suspended on
* error since the likelihood of missing data is extremely high.
*/
if (spa->spa_missing_tvds > 0 &&
spa->spa_failmode != ZIO_FAILURE_MODE_CONTINUE &&
spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_load_note(spa, "forcing failmode to 'continue' "
"as some top level vdevs are missing");
spa->spa_failmode = ZIO_FAILURE_MODE_CONTINUE;
}
return (0);
}
static int
spa_ld_open_aux_vdevs(spa_t *spa, spa_import_type_t type)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If we're assembling the pool from the split-off vdevs of
* an existing pool, we don't want to attach the spares & cache
* devices.
*/
/*
* Load any hot spares for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object,
B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
if (load_nvlist(spa, spa->spa_spares.sav_object,
&spa->spa_spares.sav_config) != 0) {
spa_load_failed(spa, "error loading spares nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Load any level 2 ARC devices for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
&spa->spa_l2cache.sav_object, B_FALSE);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
if (load_nvlist(spa, spa->spa_l2cache.sav_object,
&spa->spa_l2cache.sav_config) != 0) {
spa_load_failed(spa, "error loading l2cache nvlist");
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_l2cache.sav_sync = B_TRUE;
}
return (0);
}
static int
spa_ld_load_vdev_metadata(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* If the 'multihost' property is set, then never allow a pool to
* be imported when the system hostid is zero. The exception to
* this rule is zdb which is always allowed to access pools.
*/
if (spa_multihost(spa) && spa_get_hostid(spa) == 0 &&
(spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
/*
* If the 'autoreplace' property is set, then post a resource notifying
* the ZFS DE that it should not issue any faults for unopenable
* devices. We also iterate over the vdevs, and post a sysevent for any
* unopenable vdevs so that the normal autoreplace handler can take
* over.
*/
if (spa->spa_autoreplace && spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
spa_check_removed(spa->spa_root_vdev);
/*
* For the import case, this is done in spa_import(), because
* at this point we're using the spare definitions from
* the MOS config, not necessarily from the userland config.
*/
if (spa->spa_load_state != SPA_LOAD_IMPORT) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
}
/*
* Load the vdev metadata such as metaslabs, DTLs, spacemap object, etc.
*/
error = vdev_load(rvd);
if (error != 0) {
spa_load_failed(spa, "vdev_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
error = spa_ld_log_spacemaps(spa);
if (error != 0) {
spa_load_failed(spa, "spa_ld_log_sm_data failed [error=%d]",
error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
}
/*
* Propagate the leaf DTLs we just loaded all the way up the vdev tree.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_dtl_reassess(rvd, 0, 0, B_FALSE, B_FALSE);
spa_config_exit(spa, SCL_ALL, FTAG);
return (0);
}
static int
spa_ld_load_dedup_tables(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
error = ddt_load(spa);
if (error != 0) {
spa_load_failed(spa, "ddt_load failed [error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
return (0);
}
static int
spa_ld_verify_logs(spa_t *spa, spa_import_type_t type, char **ereport)
{
vdev_t *rvd = spa->spa_root_vdev;
if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa)) {
boolean_t missing = spa_check_logs(spa);
if (missing) {
if (spa->spa_missing_tvds != 0) {
spa_load_note(spa, "spa_check_logs failed "
"so dropping the logs");
} else {
*ereport = FM_EREPORT_ZFS_LOG_REPLAY;
spa_load_failed(spa, "spa_check_logs failed");
return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG,
ENXIO));
}
}
}
return (0);
}
static int
spa_ld_verify_pool_data(spa_t *spa)
{
int error = 0;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We've successfully opened the pool, verify that we're ready
* to start pushing transactions.
*/
if (spa->spa_load_state != SPA_LOAD_TRYIMPORT) {
error = spa_load_verify(spa);
if (error != 0) {
spa_load_failed(spa, "spa_load_verify failed "
"[error=%d]", error);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
error));
}
}
return (0);
}
static void
spa_ld_claim_log_blocks(spa_t *spa)
{
dmu_tx_t *tx;
dsl_pool_t *dp = spa_get_dsl(spa);
/*
* Claim log blocks that haven't been committed yet.
* This must all happen in a single txg.
* Note: spa_claim_max_txg is updated by spa_claim_notify(),
* invoked from zil_claim_log_block()'s i/o done callback.
* Price of rollback is that we abandon the log.
*/
spa->spa_claiming = B_TRUE;
tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_claim, tx, DS_FIND_CHILDREN);
dmu_tx_commit(tx);
spa->spa_claiming = B_FALSE;
spa_set_log_state(spa, SPA_LOG_GOOD);
}
static void
spa_ld_check_for_config_update(spa_t *spa, uint64_t config_cache_txg,
boolean_t update_config_cache)
{
vdev_t *rvd = spa->spa_root_vdev;
int need_update = B_FALSE;
/*
* If the config cache is stale, or we have uninitialized
* metaslabs (see spa_vdev_add()), then update the config.
*
* If this is a verbatim import, trust the current
* in-core spa_config and update the disk labels.
*/
if (update_config_cache || config_cache_txg != spa->spa_config_txg ||
spa->spa_load_state == SPA_LOAD_IMPORT ||
spa->spa_load_state == SPA_LOAD_RECOVER ||
(spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
need_update = B_TRUE;
for (int c = 0; c < rvd->vdev_children; c++)
if (rvd->vdev_child[c]->vdev_ms_array == 0)
need_update = B_TRUE;
/*
* Update the config cache asynchronously in case we're the
* root pool, in which case the config cache isn't writable yet.
*/
if (need_update)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
static void
spa_ld_prepare_for_reload(spa_t *spa)
{
spa_mode_t mode = spa->spa_mode;
int async_suspended = spa->spa_async_suspended;
spa_unload(spa);
spa_deactivate(spa);
spa_activate(spa, mode);
/*
* We save the value of spa_async_suspended as it gets reset to 0 by
* spa_unload(). We want to restore it back to the original value before
* returning as we might be calling spa_async_resume() later.
*/
spa->spa_async_suspended = async_suspended;
}
static int
spa_ld_read_checkpoint_txg(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT0(spa->spa_checkpoint_txg);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error == ENOENT)
return (0);
if (error != 0)
return (error);
ASSERT3U(checkpoint.ub_txg, !=, 0);
ASSERT3U(checkpoint.ub_checkpoint_txg, !=, 0);
ASSERT3U(checkpoint.ub_timestamp, !=, 0);
spa->spa_checkpoint_txg = checkpoint.ub_txg;
spa->spa_checkpoint_info.sci_timestamp = checkpoint.ub_timestamp;
return (0);
}
static int
spa_ld_mos_init(spa_t *spa, spa_import_type_t type)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
/*
* Never trust the config that is provided unless we are assembling
* a pool following a split.
* This means don't trust blkptrs and the vdev tree in general. This
* also effectively puts the spa in read-only mode since
* spa_writeable() checks for spa_trust_config to be true.
* We will later load a trusted config from the MOS.
*/
if (type != SPA_IMPORT_ASSEMBLE)
spa->spa_trust_config = B_FALSE;
/*
* Parse the config provided to create a vdev tree.
*/
error = spa_ld_parse_config(spa, type);
if (error != 0)
return (error);
spa_import_progress_add(spa);
/*
* Now that we have the vdev tree, try to open each vdev. This involves
* opening the underlying physical device, retrieving its geometry and
* probing the vdev with a dummy I/O. The state of each vdev will be set
* based on the success of those operations. After this we'll be ready
* to read from the vdevs.
*/
error = spa_ld_open_vdevs(spa);
if (error != 0)
return (error);
/*
* Read the label of each vdev and make sure that the GUIDs stored
* there match the GUIDs in the config provided.
* If we're assembling a new pool that's been split off from an
* existing pool, the labels haven't yet been updated so we skip
* validation for now.
*/
if (type != SPA_IMPORT_ASSEMBLE) {
error = spa_ld_validate_vdevs(spa);
if (error != 0)
return (error);
}
/*
* Read all vdev labels to find the best uberblock (i.e. latest,
* unless spa_load_max_txg is set) and store it in spa_uberblock. We
* get the list of features required to read blkptrs in the MOS from
* the vdev label with the best uberblock and verify that our version
* of zfs supports them all.
*/
error = spa_ld_select_uberblock(spa, type);
if (error != 0)
return (error);
/*
* Pass that uberblock to the dsl_pool layer which will open the root
* blkptr. This blkptr points to the latest version of the MOS and will
* allow us to read its contents.
*/
error = spa_ld_open_rootbp(spa);
if (error != 0)
return (error);
return (0);
}
static int
spa_ld_checkpoint_rewind(spa_t *spa)
{
uberblock_t checkpoint;
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ZPOOL_CHECKPOINT, sizeof (uint64_t),
sizeof (uberblock_t) / sizeof (uint64_t), &checkpoint);
if (error != 0) {
spa_load_failed(spa, "unable to retrieve checkpointed "
"uberblock from the MOS config [error=%d]", error);
if (error == ENOENT)
error = ZFS_ERR_NO_CHECKPOINT;
return (error);
}
ASSERT3U(checkpoint.ub_txg, <, spa->spa_uberblock.ub_txg);
ASSERT3U(checkpoint.ub_txg, ==, checkpoint.ub_checkpoint_txg);
/*
* We need to update the txg and timestamp of the checkpointed
* uberblock to be higher than the latest one. This ensures that
* the checkpointed uberblock is selected if we were to close and
* reopen the pool right after we've written it in the vdev labels.
* (also see block comment in vdev_uberblock_compare)
*/
checkpoint.ub_txg = spa->spa_uberblock.ub_txg + 1;
checkpoint.ub_timestamp = gethrestime_sec();
/*
* Set current uberblock to be the checkpointed uberblock.
*/
spa->spa_uberblock = checkpoint;
/*
* If we are doing a normal rewind, then the pool is open for
* writing and we sync the "updated" checkpointed uberblock to
* disk. Once this is done, we've basically rewound the whole
* pool and there is no way back.
*
* There are cases when we don't want to attempt and sync the
* checkpointed uberblock to disk because we are opening a
* pool as read-only. Specifically, verifying the checkpointed
* state with zdb, and importing the checkpointed state to get
* a "preview" of its content.
*/
if (spa_writeable(spa)) {
vdev_t *rvd = spa->spa_root_vdev;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd = rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, spa->spa_first_txg);
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_load_failed(spa, "failed to write checkpointed "
"uberblock to the vdev labels [error=%d]", error);
return (error);
}
}
return (0);
}
static int
spa_ld_mos_with_trusted_config(spa_t *spa, spa_import_type_t type,
boolean_t *update_config_cache)
{
int error;
/*
* Parse the config for pool, open and validate vdevs,
* select an uberblock, and use that uberblock to open
* the MOS.
*/
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
/*
* Retrieve the trusted config stored in the MOS and use it to create
* a new, exact version of the vdev tree, then reopen all vdevs.
*/
error = spa_ld_trusted_config(spa, type, B_FALSE);
if (error == EAGAIN) {
if (update_config_cache != NULL)
*update_config_cache = B_TRUE;
/*
* Redo the loading process with the trusted config if it is
* too different from the untrusted config.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "RELOADING");
error = spa_ld_mos_init(spa, type);
if (error != 0)
return (error);
error = spa_ld_trusted_config(spa, type, B_TRUE);
if (error != 0)
return (error);
} else if (error != 0) {
return (error);
}
return (0);
}
/*
* Load an existing storage pool, using the config provided. This config
* describes which vdevs are part of the pool and is later validated against
* partial configs present in each vdev's label and an entire copy of the
* config stored in the MOS.
*/
static int
spa_load_impl(spa_t *spa, spa_import_type_t type, char **ereport)
{
int error = 0;
boolean_t missing_feat_write = B_FALSE;
boolean_t checkpoint_rewind =
(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
boolean_t update_config_cache = B_FALSE;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_config_source != SPA_CONFIG_SRC_NONE);
spa_load_note(spa, "LOADING");
error = spa_ld_mos_with_trusted_config(spa, type, &update_config_cache);
if (error != 0)
return (error);
/*
* If we are rewinding to the checkpoint then we need to repeat
* everything we've done so far in this function but this time
* selecting the checkpointed uberblock and using that to open
* the MOS.
*/
if (checkpoint_rewind) {
/*
* If we are rewinding to the checkpoint update config cache
* anyway.
*/
update_config_cache = B_TRUE;
/*
* Extract the checkpointed uberblock from the current MOS
* and use this as the pool's uberblock from now on. If the
* pool is imported as writeable we also write the checkpoint
* uberblock to the labels, making the rewind permanent.
*/
error = spa_ld_checkpoint_rewind(spa);
if (error != 0)
return (error);
/*
* Redo the loading process again with the
* checkpointed uberblock.
*/
spa_ld_prepare_for_reload(spa);
spa_load_note(spa, "LOADING checkpointed uberblock");
error = spa_ld_mos_with_trusted_config(spa, type, NULL);
if (error != 0)
return (error);
}
/*
* Retrieve the checkpoint txg if the pool has a checkpoint.
*/
error = spa_ld_read_checkpoint_txg(spa);
if (error != 0)
return (error);
/*
* Retrieve the mapping of indirect vdevs. Those vdevs were removed
* from the pool and their contents were re-mapped to other vdevs. Note
* that everything that we read before this step must have been
* rewritten on concrete vdevs after the last device removal was
* initiated. Otherwise we could be reading from indirect vdevs before
* we have loaded their mappings.
*/
error = spa_ld_open_indirect_vdev_metadata(spa);
if (error != 0)
return (error);
/*
* Retrieve the full list of active features from the MOS and check if
* they are all supported.
*/
error = spa_ld_check_features(spa, &missing_feat_write);
if (error != 0)
return (error);
/*
* Load several special directories from the MOS needed by the dsl_pool
* layer.
*/
error = spa_ld_load_special_directories(spa);
if (error != 0)
return (error);
/*
* Retrieve pool properties from the MOS.
*/
error = spa_ld_get_props(spa);
if (error != 0)
return (error);
/*
* Retrieve the list of auxiliary devices - cache devices and spares -
* and open them.
*/
error = spa_ld_open_aux_vdevs(spa, type);
if (error != 0)
return (error);
/*
* Load the metadata for all vdevs. Also check if unopenable devices
* should be autoreplaced.
*/
error = spa_ld_load_vdev_metadata(spa);
if (error != 0)
return (error);
error = spa_ld_load_dedup_tables(spa);
if (error != 0)
return (error);
/*
* Verify the logs now to make sure we don't have any unexpected errors
* when we claim log blocks later.
*/
error = spa_ld_verify_logs(spa, type, ereport);
if (error != 0)
return (error);
if (missing_feat_write) {
ASSERT(spa->spa_load_state == SPA_LOAD_TRYIMPORT);
/*
* At this point, we know that we can open the pool in
* read-only mode but not read-write mode. We now have enough
* information and can return to userland.
*/
return (spa_vdev_err(spa->spa_root_vdev, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Traverse the last txgs to make sure the pool was left off in a safe
* state. When performing an extreme rewind, we verify the whole pool,
* which can take a very long time.
*/
error = spa_ld_verify_pool_data(spa);
if (error != 0)
return (error);
/*
* Calculate the deflated space for the pool. This must be done before
* we write anything to the pool because we'd need to update the space
* accounting using the deflated sizes.
*/
spa_update_dspace(spa);
/*
* We have now retrieved all the information we needed to open the
* pool. If we are importing the pool in read-write mode, a few
* additional steps must be performed to finish the import.
*/
if (spa_writeable(spa) && (spa->spa_load_state == SPA_LOAD_RECOVER ||
spa->spa_load_max_txg == UINT64_MAX)) {
uint64_t config_cache_txg = spa->spa_config_txg;
ASSERT(spa->spa_load_state != SPA_LOAD_TRYIMPORT);
/*
* In case of a checkpoint rewind, log the original txg
* of the checkpointed uberblock.
*/
if (checkpoint_rewind) {
spa_history_log_internal(spa, "checkpoint rewind",
NULL, "rewound state to txg=%llu",
(u_longlong_t)spa->spa_uberblock.ub_checkpoint_txg);
}
/*
* Traverse the ZIL and claim all blocks.
*/
spa_ld_claim_log_blocks(spa);
/*
* Kick-off the syncing thread.
*/
spa->spa_sync_on = B_TRUE;
txg_sync_start(spa->spa_dsl_pool);
mmp_thread_start(spa);
/*
* Wait for all claims to sync. We sync up to the highest
* claimed log block birth time so that claimed log blocks
* don't appear to be from the future. spa_claim_max_txg
* will have been set for us by ZIL traversal operations
* performed above.
*/
txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
/*
* Check if we need to request an update of the config. On the
* next sync, we would update the config stored in vdev labels
* and the cachefile (by default /etc/zfs/zpool.cache).
*/
spa_ld_check_for_config_update(spa, config_cache_txg,
update_config_cache);
/*
* Check if a rebuild was in progress and if so resume it.
* Then check all DTLs to see if anything needs resilvering.
* The resilver will be deferred if a rebuild was started.
*/
if (vdev_rebuild_active(spa->spa_root_vdev)) {
vdev_rebuild_restart(spa);
} else if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER);
}
/*
* Log the fact that we booted up (so that we can detect if
* we rebooted in the middle of an operation).
*/
spa_history_log_version(spa, "open", NULL);
spa_restart_removal(spa);
spa_spawn_aux_threads(spa);
/*
* Delete any inconsistent datasets.
*
* Note:
* Since we may be issuing deletes for clones here,
* we make sure to do so after we've spawned all the
* auxiliary threads above (from which the livelist
* deletion zthr is part of).
*/
(void) dmu_objset_find(spa_name(spa),
dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
/*
* Clean up any stale temporary dataset userrefs.
*/
dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
vdev_trim_restart(spa->spa_root_vdev);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_import_progress_remove(spa_guid(spa));
spa_async_request(spa, SPA_ASYNC_L2CACHE_REBUILD);
spa_load_note(spa, "LOADED");
return (0);
}
static int
spa_load_retry(spa_t *spa, spa_load_state_t state)
{
spa_mode_t mode = spa->spa_mode;
spa_unload(spa);
spa_deactivate(spa);
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
spa_activate(spa, mode);
spa_async_suspend(spa);
spa_load_note(spa, "spa_load_retry: rewind, max txg: %llu",
(u_longlong_t)spa->spa_load_max_txg);
return (spa_load(spa, state, SPA_IMPORT_EXISTING));
}
/*
* If spa_load() fails this function will try loading prior txg's. If
* 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
* will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
* function will not rewind the pool and will return the same error as
* spa_load().
*/
static int
spa_load_best(spa_t *spa, spa_load_state_t state, uint64_t max_request,
int rewind_flags)
{
nvlist_t *loadinfo = NULL;
nvlist_t *config = NULL;
int load_error, rewind_error;
uint64_t safe_rewind_txg;
uint64_t min_txg;
if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
spa->spa_load_max_txg = spa->spa_load_txg;
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
spa->spa_load_max_txg = max_request;
if (max_request != UINT64_MAX)
spa->spa_extreme_rewind = B_TRUE;
}
load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING);
if (load_error == 0)
return (0);
if (load_error == ZFS_ERR_NO_CHECKPOINT) {
/*
* When attempting checkpoint-rewind on a pool with no
* checkpoint, we should not attempt to load uberblocks
* from previous txgs when spa_load fails.
*/
ASSERT(spa->spa_import_flags & ZFS_IMPORT_CHECKPOINT);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (spa->spa_root_vdev != NULL)
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
if (rewind_flags & ZPOOL_NEVER_REWIND) {
nvlist_free(config);
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
if (state == SPA_LOAD_RECOVER) {
/* Price of rolling back is discarding txgs, including log */
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
/*
* If we aren't rolling back save the load info from our first
* import attempt so that we can restore it after attempting
* to rewind.
*/
loadinfo = spa->spa_load_info;
spa->spa_load_info = fnvlist_alloc();
}
spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
TXG_INITIAL : safe_rewind_txg;
/*
* Continue as long as we're finding errors, we're still within
* the acceptable rewind range, and we're still finding uberblocks
*/
while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
if (spa->spa_load_max_txg < safe_rewind_txg)
spa->spa_extreme_rewind = B_TRUE;
rewind_error = spa_load_retry(spa, state);
}
spa->spa_extreme_rewind = B_FALSE;
spa->spa_load_max_txg = UINT64_MAX;
if (config && (rewind_error || state != SPA_LOAD_RECOVER))
spa_config_set(spa, config);
else
nvlist_free(config);
if (state == SPA_LOAD_RECOVER) {
ASSERT3P(loadinfo, ==, NULL);
spa_import_progress_remove(spa_guid(spa));
return (rewind_error);
} else {
/* Store the rewind info as part of the initial load info */
fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
spa->spa_load_info);
/* Restore the initial load info */
fnvlist_free(spa->spa_load_info);
spa->spa_load_info = loadinfo;
spa_import_progress_remove(spa_guid(spa));
return (load_error);
}
}
/*
* Pool Open/Import
*
* The import case is identical to an open except that the configuration is sent
* down from userland, instead of grabbed from the configuration cache. For the
* case of an open, the pool configuration will exist in the
* POOL_STATE_UNINITIALIZED state.
*
* The stats information (gen/count/ustats) is used to gather vdev statistics at
* the same time open the pool, without having to keep around the spa_t in some
* ambiguous state.
*/
static int
spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
nvlist_t **config)
{
spa_t *spa;
spa_load_state_t state = SPA_LOAD_OPEN;
int error;
int locked = B_FALSE;
int firstopen = B_FALSE;
*spapp = NULL;
/*
* As disgusting as this is, we need to support recursive calls to this
* function because dsl_dir_open() is called during spa_load(), and ends
* up calling spa_open() again. The real fix is to figure out how to
* avoid dsl_dir_open() calling this in the first place.
*/
if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
mutex_enter(&spa_namespace_lock);
locked = B_TRUE;
}
if ((spa = spa_lookup(pool)) == NULL) {
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
zpool_load_policy_t policy;
firstopen = B_TRUE;
zpool_get_load_policy(nvpolicy ? nvpolicy : spa->spa_config,
&policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa_activate(spa, spa_mode_global);
if (state != SPA_LOAD_RECOVER)
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
zfs_dbgmsg("spa_open_common: opening %s", pool);
error = spa_load_best(spa, state, policy.zlp_txg,
policy.zlp_rewind);
if (error == EBADF) {
/*
* If vdev_validate() returns failure (indicated by
* EBADF), it indicates that one of the vdevs indicates
* that the pool has been exported or destroyed. If
* this is the case, the config cache is out of sync and
* we should remove the pool from the namespace.
*/
spa_unload(spa);
spa_deactivate(spa);
spa_write_cachefile(spa, B_TRUE, B_TRUE);
spa_remove(spa);
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (error) {
/*
* We can't open the pool, but we still have useful
* information: the state of each vdev after the
* attempted vdev_open(). Return this to the user.
*/
if (config != NULL && spa->spa_config) {
VERIFY(nvlist_dup(spa->spa_config, config,
KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist(*config,
ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
}
spa_unload(spa);
spa_deactivate(spa);
spa->spa_last_open_failed = error;
if (locked)
mutex_exit(&spa_namespace_lock);
*spapp = NULL;
return (error);
}
}
spa_open_ref(spa, tag);
if (config != NULL)
*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
/*
* If we've recovered the pool, pass back any information we
* gathered while doing the load.
*/
if (state == SPA_LOAD_RECOVER) {
VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
}
if (locked) {
spa->spa_last_open_failed = 0;
spa->spa_last_ubsync_txg = 0;
spa->spa_load_txg = 0;
mutex_exit(&spa_namespace_lock);
}
if (firstopen)
zvol_create_minors_recursive(spa_name(spa));
*spapp = spa;
return (0);
}
int
spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
nvlist_t **config)
{
return (spa_open_common(name, spapp, tag, policy, config));
}
int
spa_open(const char *name, spa_t **spapp, void *tag)
{
return (spa_open_common(name, spapp, tag, NULL, NULL));
}
/*
* Lookup the given spa_t, incrementing the inject count in the process,
* preventing it from being exported or destroyed.
*/
spa_t *
spa_inject_addref(char *name)
{
spa_t *spa;
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(name)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (NULL);
}
spa->spa_inject_ref++;
mutex_exit(&spa_namespace_lock);
return (spa);
}
void
spa_inject_delref(spa_t *spa)
{
mutex_enter(&spa_namespace_lock);
spa->spa_inject_ref--;
mutex_exit(&spa_namespace_lock);
}
/*
* Add spares device information to the nvlist.
*/
static void
spa_add_spares(spa_t *spa, nvlist_t *config)
{
nvlist_t **spares;
uint_t i, nspares;
nvlist_t *nvroot;
uint64_t guid;
vdev_stat_t *vs;
uint_t vsc;
uint64_t pool;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_spares.sav_count == 0)
return;
VERIFY(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
if (nspares != 0) {
VERIFY(nvlist_add_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
VERIFY(nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
/*
* Go through and find any spares which have since been
* repurposed as an active spare. If this is the case, update
* their status appropriately.
*/
for (i = 0; i < nspares; i++) {
VERIFY(nvlist_lookup_uint64(spares[i],
ZPOOL_CONFIG_GUID, &guid) == 0);
if (spa_spare_exists(guid, &pool, NULL) &&
pool != 0ULL) {
VERIFY(nvlist_lookup_uint64_array(
spares[i], ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
vs->vs_state = VDEV_STATE_CANT_OPEN;
vs->vs_aux = VDEV_AUX_SPARED;
}
}
}
}
/*
* Add l2cache device information to the nvlist, including vdev stats.
*/
static void
spa_add_l2cache(spa_t *spa, nvlist_t *config)
{
nvlist_t **l2cache;
uint_t i, j, nl2cache;
nvlist_t *nvroot;
uint64_t guid;
vdev_t *vd;
vdev_stat_t *vs;
uint_t vsc;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_l2cache.sav_count == 0)
return;
VERIFY(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
if (nl2cache != 0) {
VERIFY(nvlist_add_nvlist_array(nvroot,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
VERIFY(nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
/*
* Update level 2 cache device stats.
*/
for (i = 0; i < nl2cache; i++) {
VERIFY(nvlist_lookup_uint64(l2cache[i],
ZPOOL_CONFIG_GUID, &guid) == 0);
vd = NULL;
for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
if (guid ==
spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
vd = spa->spa_l2cache.sav_vdevs[j];
break;
}
}
ASSERT(vd != NULL);
VERIFY(nvlist_lookup_uint64_array(l2cache[i],
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
== 0);
vdev_get_stats(vd, vs);
vdev_config_generate_stats(vd, l2cache[i]);
}
}
}
static void
spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
{
zap_cursor_t zc;
zap_attribute_t za;
if (spa->spa_feat_for_read_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_read_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
if (spa->spa_feat_for_write_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_write_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
}
static void
spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
{
int i;
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t feature = spa_feature_table[i];
uint64_t refcount;
if (feature_get_refcount(spa, &feature, &refcount) != 0)
continue;
VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
}
}
/*
* Store a list of pool features and their reference counts in the
* config.
*
* The first time this is called on a spa, allocate a new nvlist, fetch
* the pool features and reference counts from disk, then save the list
* in the spa. In subsequent calls on the same spa use the saved nvlist
* and refresh its values from the cached reference counts. This
* ensures we don't block here on I/O on a suspended pool so 'zpool
* clear' can resume the pool.
*/
static void
spa_add_feature_stats(spa_t *spa, nvlist_t *config)
{
nvlist_t *features;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
mutex_enter(&spa->spa_feat_stats_lock);
features = spa->spa_feat_stats;
if (features != NULL) {
spa_feature_stats_from_cache(spa, features);
} else {
VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
spa->spa_feat_stats = features;
spa_feature_stats_from_disk(spa, features);
}
VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
features));
mutex_exit(&spa->spa_feat_stats_lock);
}
int
spa_get_stats(const char *name, nvlist_t **config,
char *altroot, size_t buflen)
{
int error;
spa_t *spa;
*config = NULL;
error = spa_open_common(name, &spa, FTAG, NULL, config);
if (spa != NULL) {
/*
* This still leaves a window of inconsistency where the spares
* or l2cache devices could change and the config would be
* self-inconsistent.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
if (*config != NULL) {
uint64_t loadtimes[2];
loadtimes[0] = spa->spa_loaded_ts.tv_sec;
loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
VERIFY(nvlist_add_uint64_array(*config,
ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_ERRCOUNT,
spa_get_errlog_size(spa)) == 0);
if (spa_suspended(spa)) {
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED,
spa->spa_failmode) == 0);
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED_REASON,
spa->spa_suspended) == 0);
}
spa_add_spares(spa, *config);
spa_add_l2cache(spa, *config);
spa_add_feature_stats(spa, *config);
}
}
/*
* We want to get the alternate root even for faulted pools, so we cheat
* and call spa_lookup() directly.
*/
if (altroot) {
if (spa == NULL) {
mutex_enter(&spa_namespace_lock);
spa = spa_lookup(name);
if (spa)
spa_altroot(spa, altroot, buflen);
else
altroot[0] = '\0';
spa = NULL;
mutex_exit(&spa_namespace_lock);
} else {
spa_altroot(spa, altroot, buflen);
}
}
if (spa != NULL) {
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_close(spa, FTAG);
}
return (error);
}
/*
* Validate that the auxiliary device array is well formed. We must have an
* array of nvlists, each which describes a valid leaf vdev. If this is an
* import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
* specified, as long as they are well-formed.
*/
static int
spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
spa_aux_vdev_t *sav, const char *config, uint64_t version,
vdev_labeltype_t label)
{
nvlist_t **dev;
uint_t i, ndev;
vdev_t *vd;
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* It's acceptable to have no devs specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
return (0);
if (ndev == 0)
return (SET_ERROR(EINVAL));
/*
* Make sure the pool is formatted with a version that supports this
* device type.
*/
if (spa_version(spa) < version)
return (SET_ERROR(ENOTSUP));
/*
* Set the pending device list so we correctly handle device in-use
* checking.
*/
sav->sav_pending = dev;
sav->sav_npending = ndev;
for (i = 0; i < ndev; i++) {
if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
mode)) != 0)
goto out;
if (!vd->vdev_ops->vdev_op_leaf) {
vdev_free(vd);
error = SET_ERROR(EINVAL);
goto out;
}
vd->vdev_top = vd;
if ((error = vdev_open(vd)) == 0 &&
(error = vdev_label_init(vd, crtxg, label)) == 0) {
VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
vd->vdev_guid) == 0);
}
vdev_free(vd);
if (error &&
(mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
goto out;
else
error = 0;
}
out:
sav->sav_pending = NULL;
sav->sav_npending = 0;
return (error);
}
static int
spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
{
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
VDEV_LABEL_SPARE)) != 0) {
return (error);
}
return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
VDEV_LABEL_L2CACHE));
}
static void
spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
const char *config)
{
int i;
if (sav->sav_config != NULL) {
nvlist_t **olddevs;
uint_t oldndevs;
nvlist_t **newdevs;
/*
* Generate new dev list by concatenating with the
* current dev list.
*/
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
&olddevs, &oldndevs) == 0);
newdevs = kmem_alloc(sizeof (void *) *
(ndevs + oldndevs), KM_SLEEP);
for (i = 0; i < oldndevs; i++)
VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
KM_SLEEP) == 0);
for (i = 0; i < ndevs; i++)
VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
KM_SLEEP) == 0);
VERIFY(nvlist_remove(sav->sav_config, config,
DATA_TYPE_NVLIST_ARRAY) == 0);
VERIFY(nvlist_add_nvlist_array(sav->sav_config,
config, newdevs, ndevs + oldndevs) == 0);
for (i = 0; i < oldndevs + ndevs; i++)
nvlist_free(newdevs[i]);
kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
} else {
/*
* Generate a new dev list.
*/
VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
devs, ndevs) == 0);
}
}
/*
* Stop and drop level 2 ARC devices
*/
void
spa_l2cache_drop(spa_t *spa)
{
vdev_t *vd;
int i;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
for (i = 0; i < sav->sav_count; i++) {
uint64_t pool;
vd = sav->sav_vdevs[i];
ASSERT(vd != NULL);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
}
}
/*
* Verify encryption parameters for spa creation. If we are encrypting, we must
* have the encryption feature flag enabled.
*/
static int
spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
boolean_t has_encryption)
{
if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
!has_encryption)
return (SET_ERROR(ENOTSUP));
return (dmu_objset_create_crypt_check(NULL, dcp, NULL));
}
/*
* Pool Creation
*/
int
spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
nvlist_t *zplprops, dsl_crypto_params_t *dcp)
{
spa_t *spa;
char *altroot = NULL;
vdev_t *rvd;
dsl_pool_t *dp;
dmu_tx_t *tx;
int error = 0;
uint64_t txg = TXG_INITIAL;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
uint64_t version, obj, ndraid = 0;
boolean_t has_features;
boolean_t has_encryption;
boolean_t has_allocclass;
spa_feature_t feat;
char *feat_name;
char *poolname;
nvlist_t *nvl;
if (props == NULL ||
nvlist_lookup_string(props, "tname", &poolname) != 0)
poolname = (char *)pool;
/*
* If this pool already exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(poolname) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Allocate a new spa_t structure.
*/
nvl = fnvlist_alloc();
fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
spa = spa_add(poolname, nvl, altroot);
fnvlist_free(nvl);
spa_activate(spa, spa_mode_global);
if (props && (error = spa_prop_validate(spa, props))) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Temporary pool names should never be written to disk.
*/
if (poolname != pool)
spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
has_features = B_FALSE;
has_encryption = B_FALSE;
has_allocclass = B_FALSE;
for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
if (zpool_prop_feature(nvpair_name(elem))) {
has_features = B_TRUE;
feat_name = strchr(nvpair_name(elem), '@') + 1;
VERIFY0(zfeature_lookup_name(feat_name, &feat));
if (feat == SPA_FEATURE_ENCRYPTION)
has_encryption = B_TRUE;
if (feat == SPA_FEATURE_ALLOCATION_CLASSES)
has_allocclass = B_TRUE;
}
}
/* verify encryption params, if they were provided */
if (dcp != NULL) {
error = spa_create_check_encryption_params(dcp, has_encryption);
if (error != 0) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
}
if (!has_allocclass && zfs_special_devs(nvroot, NULL)) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (ENOTSUP);
}
if (has_features || nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
version = SPA_VERSION;
}
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
spa->spa_first_txg = txg;
spa->spa_uberblock.ub_txg = txg - 1;
spa->spa_uberblock.ub_version = version;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_load_state = SPA_LOAD_CREATE;
spa->spa_removing_phys.sr_state = DSS_NONE;
spa->spa_removing_phys.sr_removing_vdev = -1;
spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
spa->spa_indirect_vdevs_loaded = B_TRUE;
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Create the root vdev.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
ASSERT(error != 0 || rvd != NULL);
ASSERT(error != 0 || spa->spa_root_vdev == rvd);
if (error == 0 && !zfs_allocatable_devs(nvroot))
error = SET_ERROR(EINVAL);
if (error == 0 &&
(error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
(error = vdev_draid_spare_create(nvroot, rvd, &ndraid, 0)) == 0 &&
(error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) == 0) {
/*
* instantiate the metaslab groups (this will dirty the vdevs)
* we can no longer error exit past this point
*/
for (int c = 0; error == 0 && c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_metaslab_set_size(vd);
vdev_expand(vd, txg);
}
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Get the list of spares, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Get the list of level 2 cache devices, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
spa->spa_is_initializing = B_TRUE;
spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
spa->spa_is_initializing = B_FALSE;
/*
* Create DDTs (dedup tables).
*/
ddt_create(spa);
spa_update_dspace(spa);
tx = dmu_tx_create_assigned(dp, txg);
/*
* Create the pool's history object.
*/
if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
spa_history_create_obj(spa, tx);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
spa_history_log_version(spa, "create", tx);
/*
* Create the pool config object.
*/
spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool config");
}
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
sizeof (uint64_t), 1, &version, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool version");
}
/* Newly created pools with the right version are always deflated. */
if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
spa->spa_deflate = TRUE;
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
cmn_err(CE_PANIC, "failed to add deflate");
}
}
/*
* Create the deferred-free bpobj. Turn off compression
* because sync-to-convergence takes longer if the blocksize
* keeps changing.
*/
obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
dmu_object_set_compress(spa->spa_meta_objset, obj,
ZIO_COMPRESS_OFF, tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
sizeof (uint64_t), 1, &obj, tx) != 0) {
cmn_err(CE_PANIC, "failed to add bpobj");
}
VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
spa->spa_meta_objset, obj));
/*
* Generate some random noise for salted checksums to operate on.
*/
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
/*
* Set pool properties.
*/
spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
spa->spa_autotrim = zpool_prop_default_numeric(ZPOOL_PROP_AUTOTRIM);
if (props != NULL) {
spa_configfile_set(spa, props, B_FALSE);
spa_sync_props(props, tx);
}
for (int i = 0; i < ndraid; i++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
dmu_tx_commit(tx);
spa->spa_sync_on = B_TRUE;
txg_sync_start(dp);
mmp_thread_start(spa);
txg_wait_synced(dp, txg);
spa_spawn_aux_threads(spa);
spa_write_cachefile(spa, B_FALSE, B_TRUE);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = zfs_refcount_count(&spa->spa_refcount);
spa->spa_load_state = SPA_LOAD_NONE;
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Import a non-root pool into the system.
*/
int
spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
{
spa_t *spa;
char *altroot = NULL;
spa_load_state_t state = SPA_LOAD_IMPORT;
zpool_load_policy_t policy;
spa_mode_t mode = spa_mode_global;
uint64_t readonly = B_FALSE;
int error;
nvlist_t *nvroot;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
/*
* If a pool with this name exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(pool) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Create and initialize the spa structure.
*/
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
(void) nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
if (readonly)
mode = SPA_MODE_READ;
spa = spa_add(pool, config, altroot);
spa->spa_import_flags = flags;
/*
* Verbatim import - Take a pool and insert it into the namespace
* as if it had been loaded at boot.
*/
if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
zfs_dbgmsg("spa_import: verbatim import of %s", pool);
mutex_exit(&spa_namespace_lock);
return (0);
}
spa_activate(spa, mode);
/*
* Don't start async tasks until we know everything is healthy.
*/
spa_async_suspend(spa);
zpool_get_load_policy(config, &policy);
if (policy.zlp_rewind & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa->spa_config_source = SPA_CONFIG_SRC_TRYIMPORT;
if (state != SPA_LOAD_RECOVER) {
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
zfs_dbgmsg("spa_import: importing %s", pool);
} else {
zfs_dbgmsg("spa_import: importing %s, max_txg=%lld "
"(RECOVERY MODE)", pool, (longlong_t)policy.zlp_txg);
}
error = spa_load_best(spa, state, policy.zlp_txg, policy.zlp_rewind);
/*
* Propagate anything learned while loading the pool and pass it
* back to caller (i.e. rewind info, missing devices, etc).
*/
VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Toss any existing sparelist, as it doesn't have any validity
* anymore, and conflicts with spa_has_spare().
*/
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
spa_load_spares(spa);
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
spa_load_l2cache(spa);
}
VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
spa_config_exit(spa, SCL_ALL, FTAG);
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
if (error != 0 || (props && spa_writeable(spa) &&
(error = spa_prop_set(spa, props)))) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
spa_async_resume(spa);
/*
* Override any spares and level 2 cache devices as specified by
* the user, as these may have correct device names/devids, etc.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
if (spa->spa_spares.sav_config)
VERIFY(nvlist_remove(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
else
VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
if (spa->spa_l2cache.sav_config)
VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
else
VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* Check for any removed devices.
*/
if (spa->spa_autoreplace) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
if (spa_writeable(spa)) {
/*
* Update the config cache to include the newly-imported pool.
*/
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
}
/*
* It's possible that the pool was expanded while it was exported.
* We kick off an async task to handle this for us.
*/
spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
spa_history_log_version(spa, "import", NULL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
mutex_exit(&spa_namespace_lock);
zvol_create_minors_recursive(pool);
return (0);
}
nvlist_t *
spa_tryimport(nvlist_t *tryconfig)
{
nvlist_t *config = NULL;
char *poolname, *cachefile;
spa_t *spa;
uint64_t state;
int error;
zpool_load_policy_t policy;
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
return (NULL);
if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
return (NULL);
/*
* Create and initialize the spa structure.
*/
mutex_enter(&spa_namespace_lock);
spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
spa_activate(spa, SPA_MODE_READ);
/*
* Rewind pool if a max txg was provided.
*/
zpool_get_load_policy(spa->spa_config, &policy);
if (policy.zlp_txg != UINT64_MAX) {
spa->spa_load_max_txg = policy.zlp_txg;
spa->spa_extreme_rewind = B_TRUE;
zfs_dbgmsg("spa_tryimport: importing %s, max_txg=%lld",
poolname, (longlong_t)policy.zlp_txg);
} else {
zfs_dbgmsg("spa_tryimport: importing %s", poolname);
}
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_CACHEFILE, &cachefile)
== 0) {
zfs_dbgmsg("spa_tryimport: using cachefile '%s'", cachefile);
spa->spa_config_source = SPA_CONFIG_SRC_CACHEFILE;
} else {
spa->spa_config_source = SPA_CONFIG_SRC_SCAN;
}
error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING);
/*
* If 'tryconfig' was at least parsable, return the current config.
*/
if (spa->spa_root_vdev != NULL) {
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
poolname) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
state) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
spa->spa_uberblock.ub_timestamp) == 0);
VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
spa->spa_errata) == 0);
/*
* If the bootfs property exists on this pool then we
* copy it out so that external consumers can tell which
* pools are bootable.
*/
if ((!error || error == EEXIST) && spa->spa_bootfs) {
char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/*
* We have to play games with the name since the
* pool was opened as TRYIMPORT_NAME.
*/
if (dsl_dsobj_to_dsname(spa_name(spa),
spa->spa_bootfs, tmpname) == 0) {
char *cp;
char *dsname;
dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
cp = strchr(tmpname, '/');
if (cp == NULL) {
(void) strlcpy(dsname, tmpname,
MAXPATHLEN);
} else {
(void) snprintf(dsname, MAXPATHLEN,
"%s/%s", poolname, ++cp);
}
VERIFY(nvlist_add_string(config,
ZPOOL_CONFIG_BOOTFS, dsname) == 0);
kmem_free(dsname, MAXPATHLEN);
}
kmem_free(tmpname, MAXPATHLEN);
}
/*
* Add the list of hot spares and level 2 cache devices.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_add_spares(spa, config);
spa_add_l2cache(spa, config);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (config);
}
/*
* Pool export/destroy
*
* The act of destroying or exporting a pool is very simple. We make sure there
* is no more pending I/O and any references to the pool are gone. Then, we
* update the pool state and sync all the labels to disk, removing the
* configuration from the cache afterwards. If the 'hardforce' flag is set, then
* we don't sync the labels or remove the configuration cache.
*/
static int
spa_export_common(const char *pool, int new_state, nvlist_t **oldconfig,
boolean_t force, boolean_t hardforce)
{
int error;
spa_t *spa;
if (oldconfig)
*oldconfig = NULL;
if (!(spa_mode_global & SPA_MODE_WRITE))
return (SET_ERROR(EROFS));
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(pool)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_is_exporting) {
/* the pool is being exported by another thread */
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ZFS_ERR_EXPORT_IN_PROGRESS));
}
spa->spa_is_exporting = B_TRUE;
/*
* Put a hold on the pool, drop the namespace lock, stop async tasks,
* reacquire the namespace lock, and see if we can export.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
if (spa->spa_zvol_taskq) {
zvol_remove_minors(spa, spa_name(spa), B_TRUE);
taskq_wait(spa->spa_zvol_taskq);
}
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
goto export_spa;
/*
* The pool will be in core if it's openable, in which case we can
* modify its state. Objsets may be open only because they're dirty,
* so we have to force it to sync before checking spa_refcnt.
*/
if (spa->spa_sync_on) {
txg_wait_synced(spa->spa_dsl_pool, 0);
spa_evicting_os_wait(spa);
}
/*
* A pool cannot be exported or destroyed if there are active
* references. If we are resetting a pool, allow references by
* fault injection handlers.
*/
if (!spa_refcount_zero(spa) || (spa->spa_inject_ref != 0)) {
error = SET_ERROR(EBUSY);
goto fail;
}
if (spa->spa_sync_on) {
/*
* A pool cannot be exported if it has an active shared spare.
* This is to prevent other pools stealing the active spare
* from an exported pool. At user's own will, such pool can
* be forcedly exported.
*/
if (!force && new_state == POOL_STATE_EXPORTED &&
spa_has_active_shared_spare(spa)) {
error = SET_ERROR(EXDEV);
goto fail;
}
/*
* We're about to export or destroy this pool. Make sure
* we stop all initialization and trim activity here before
* we set the spa_final_txg. This will ensure that all
* dirty data resulting from the initialization is
* committed to disk before we unload the pool.
*/
if (spa->spa_root_vdev != NULL) {
vdev_t *rvd = spa->spa_root_vdev;
vdev_initialize_stop_all(rvd, VDEV_INITIALIZE_ACTIVE);
vdev_trim_stop_all(rvd, VDEV_TRIM_ACTIVE);
vdev_autotrim_stop_all(spa);
vdev_rebuild_stop_all(spa);
}
/*
* We want this to be reflected on every label,
* so mark them all dirty. spa_unload() will do the
* final sync that pushes these changes out.
*/
if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_state = new_state;
spa->spa_final_txg = spa_last_synced_txg(spa) +
TXG_DEFER_SIZE + 1;
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
}
}
export_spa:
if (new_state == POOL_STATE_DESTROYED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
else if (new_state == POOL_STATE_EXPORTED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
if (oldconfig && spa->spa_config)
VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
if (new_state != POOL_STATE_UNINITIALIZED) {
if (!hardforce)
spa_write_cachefile(spa, B_TRUE, B_TRUE);
spa_remove(spa);
} else {
/*
* If spa_remove() is not called for this spa_t and
* there is any possibility that it can be reused,
* we make sure to reset the exporting flag.
*/
spa->spa_is_exporting = B_FALSE;
}
mutex_exit(&spa_namespace_lock);
return (0);
fail:
spa->spa_is_exporting = B_FALSE;
spa_async_resume(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Destroy a storage pool.
*/
int
spa_destroy(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
B_FALSE, B_FALSE));
}
/*
* Export a storage pool.
*/
int
spa_export(const char *pool, nvlist_t **oldconfig, boolean_t force,
boolean_t hardforce)
{
return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
force, hardforce));
}
/*
* Similar to spa_export(), this unloads the spa_t without actually removing it
* from the namespace in any way.
*/
int
spa_reset(const char *pool)
{
return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
B_FALSE, B_FALSE));
}
/*
* ==========================================================================
* Device manipulation
* ==========================================================================
*/
/*
* This is called as a synctask to increment the draid feature flag
*/
static void
spa_draid_feature_incr(void *arg, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
int draid = (int)(uintptr_t)arg;
for (int c = 0; c < draid; c++)
spa_feature_incr(spa, SPA_FEATURE_DRAID, tx);
}
/*
* Add a device to a storage pool.
*/
int
spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
{
uint64_t txg, ndraid = 0;
int error;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd, *tvd;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
&nspares) != 0)
nspares = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
&nl2cache) != 0)
nl2cache = 0;
if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
if (vd->vdev_children != 0 &&
(error = vdev_create(vd, txg, B_FALSE)) != 0) {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* The virtual dRAID spares must be added after vdev tree is created
* and the vdev guids are generated. The guid of their associated
* dRAID is stored in the config and used when opening the spare.
*/
if ((error = vdev_draid_spare_create(nvroot, vd, &ndraid,
rvd->vdev_children)) == 0) {
if (ndraid > 0 && nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, &spares, &nspares) != 0)
nspares = 0;
} else {
return (spa_vdev_exit(spa, vd, txg, error));
}
/*
* We must validate the spares and l2cache devices after checking the
* children. Otherwise, vdev_inuse() will blindly overwrite the spare.
*/
if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, vd, txg, error));
/*
* If we are in the middle of a device removal, we can only add
* devices which match the existing devices in the pool.
* If we are in the middle of a removal, or have some indirect
* vdevs, we can not add raidz or dRAID top levels.
*/
if (spa->spa_vdev_removal != NULL ||
spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
if (spa->spa_vdev_removal != NULL &&
tvd->vdev_ashift != spa->spa_max_ashift) {
return (spa_vdev_exit(spa, vd, txg, EINVAL));
}
/* Fail if top level vdev is raidz or a dRAID */
if (vdev_get_nparity(tvd) != 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
/*
* Need the top level mirror to be
* a mirror of leaf vdevs only
*/
if (tvd->vdev_ops == &vdev_mirror_ops) {
for (uint64_t cid = 0;
cid < tvd->vdev_children; cid++) {
vdev_t *cvd = tvd->vdev_child[cid];
if (!cvd->vdev_ops->vdev_op_leaf) {
return (spa_vdev_exit(spa, vd,
txg, EINVAL));
}
}
}
}
}
for (int c = 0; c < vd->vdev_children; c++) {
tvd = vd->vdev_child[c];
vdev_remove_child(vd, tvd);
tvd->vdev_id = rvd->vdev_children;
vdev_add_child(rvd, tvd);
vdev_config_dirty(tvd);
}
if (nspares != 0) {
spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
ZPOOL_CONFIG_SPARES);
spa_load_spares(spa);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nl2cache != 0) {
spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
ZPOOL_CONFIG_L2CACHE);
spa_load_l2cache(spa);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* We can't increment a feature while holding spa_vdev so we
* have to do it in a synctask.
*/
if (ndraid != 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
dsl_sync_task_nowait(spa->spa_dsl_pool, spa_draid_feature_incr,
(void *)(uintptr_t)ndraid, tx);
dmu_tx_commit(tx);
}
/*
* We have to be careful when adding new vdevs to an existing pool.
* If other threads start allocating from these vdevs before we
* sync the config cache, and we lose power, then upon reboot we may
* fail to open the pool because there are DVAs that the config cache
* can't translate. Therefore, we first add the vdevs without
* initializing metaslabs; sync the config cache (via spa_vdev_exit());
* and then let spa_config_update() initialize the new metaslabs.
*
* spa_load() checks for added-but-not-initialized vdevs, so that
* if we lose power at any point in this sequence, the remaining
* steps will be completed the next time we load the pool.
*/
(void) spa_vdev_exit(spa, vd, txg, 0);
mutex_enter(&spa_namespace_lock);
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Attach a device to a mirror. The arguments are the path to any device
* in the mirror, and the nvroot for the new device. If the path specifies
* a device that is not mirrored, we automatically insert the mirror vdev.
*
* If 'replacing' is specified, the new device is intended to replace the
* existing device; in this case the two devices are made into their own
* mirror using the 'replacing' vdev, which is functionally identical to
* the mirror vdev (it actually reuses all the same ops) but has a few
* extra rules: you can't attach to it after it's been created, and upon
* completion of resilvering, the first disk (the one being replaced)
* is automatically detached.
*
* If 'rebuild' is specified, then sequential reconstruction (a.ka. rebuild)
* should be performed instead of traditional healing reconstruction. From
* an administrators perspective these are both resilver operations.
*/
int
spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing,
int rebuild)
{
uint64_t txg, dtl_max_txg;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
vdev_ops_t *pvops;
char *oldvdpath, *newvdpath;
int newvd_isspare;
int error;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (rebuild) {
if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REBUILD))
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
if (dsl_scan_resilvering(spa_get_dsl(spa)))
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_RESILVER_IN_PROGRESS));
} else {
if (vdev_rebuild_active(rvd))
return (spa_vdev_exit(spa, NULL, txg,
ZFS_ERR_REBUILD_IN_PROGRESS));
}
if (spa->spa_vdev_removal != NULL)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
if (oldvd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!oldvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = oldvd->vdev_parent;
if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
VDEV_ALLOC_ATTACH)) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
if (newrootvd->vdev_children != 1)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
newvd = newrootvd->vdev_child[0];
if (!newvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
return (spa_vdev_exit(spa, newrootvd, txg, error));
/*
* Spares can't replace logs
*/
if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* A dRAID spare can only replace a child of its parent dRAID vdev.
*/
if (newvd->vdev_ops == &vdev_draid_spare_ops &&
oldvd->vdev_top != vdev_draid_spare_get_parent(newvd)) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (rebuild) {
/*
* For rebuilds, the top vdev must support reconstruction
* using only space maps. This means the only allowable
* vdevs types are the root vdev, a mirror, or dRAID.
*/
tvd = pvd;
if (pvd->vdev_top != NULL)
tvd = pvd->vdev_top;
if (tvd->vdev_ops != &vdev_mirror_ops &&
tvd->vdev_ops != &vdev_root_ops &&
tvd->vdev_ops != &vdev_draid_ops) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
}
if (!replacing) {
/*
* For attach, the only allowable parent is a mirror or the root
* vdev.
*/
if (pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_root_ops)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
pvops = &vdev_mirror_ops;
} else {
/*
* Active hot spares can only be replaced by inactive hot
* spares.
*/
if (pvd->vdev_ops == &vdev_spare_ops &&
oldvd->vdev_isspare &&
!spa_has_spare(spa, newvd->vdev_guid))
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If the source is a hot spare, and the parent isn't already a
* spare, then we want to create a new hot spare. Otherwise, we
* want to create a replacing vdev. The user is not allowed to
* attach to a spared vdev child unless the 'isspare' state is
* the same (spare replaces spare, non-spare replaces
* non-spare).
*/
if (pvd->vdev_ops == &vdev_replacing_ops &&
spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
} else if (pvd->vdev_ops == &vdev_spare_ops &&
newvd->vdev_isspare != oldvd->vdev_isspare) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (newvd->vdev_isspare)
pvops = &vdev_spare_ops;
else
pvops = &vdev_replacing_ops;
}
/*
* Make sure the new device is big enough.
*/
if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
/*
* The new device cannot have a higher alignment requirement
* than the top-level vdev.
*/
if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If this is an in-place replacement, update oldvd's path and devid
* to make it distinguishable from newvd, and unopenable from now on.
*/
if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
spa_strfree(oldvd->vdev_path);
oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
KM_SLEEP);
(void) snprintf(oldvd->vdev_path, strlen(newvd->vdev_path) + 5,
"%s/%s", newvd->vdev_path, "old");
if (oldvd->vdev_devid != NULL) {
spa_strfree(oldvd->vdev_devid);
oldvd->vdev_devid = NULL;
}
}
/*
* If the parent is not a mirror, or if we're replacing, insert the new
* mirror/replacing/spare vdev above oldvd.
*/
if (pvd->vdev_ops != pvops)
pvd = vdev_add_parent(oldvd, pvops);
ASSERT(pvd->vdev_top->vdev_parent == rvd);
ASSERT(pvd->vdev_ops == pvops);
ASSERT(oldvd->vdev_parent == pvd);
/*
* Extract the new device from its root and add it to pvd.
*/
vdev_remove_child(newrootvd, newvd);
newvd->vdev_id = pvd->vdev_children;
newvd->vdev_crtxg = oldvd->vdev_crtxg;
vdev_add_child(pvd, newvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(pvd);
tvd = newvd->vdev_top;
ASSERT(pvd->vdev_top == tvd);
ASSERT(tvd->vdev_parent == rvd);
vdev_config_dirty(tvd);
/*
* Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
* for any dmu_sync-ed blocks. It will propagate upward when
* spa_vdev_exit() calls vdev_dtl_reassess().
*/
dtl_max_txg = txg + TXG_CONCURRENT_STATES;
vdev_dtl_dirty(newvd, DTL_MISSING,
TXG_INITIAL, dtl_max_txg - TXG_INITIAL);
if (newvd->vdev_isspare) {
spa_spare_activate(newvd);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
}
oldvdpath = spa_strdup(oldvd->vdev_path);
newvdpath = spa_strdup(newvd->vdev_path);
newvd_isspare = newvd->vdev_isspare;
/*
* Mark newvd's DTL dirty in this txg.
*/
vdev_dirty(tvd, VDD_DTL, newvd, txg);
/*
* Schedule the resilver or rebuild to restart in the future. We do
* this to ensure that dmu_sync-ed blocks have been stitched into the
* respective datasets.
*/
if (rebuild) {
newvd->vdev_rebuild_txg = txg;
vdev_rebuild(tvd);
} else {
newvd->vdev_resilver_txg = txg;
if (dsl_scan_resilvering(spa_get_dsl(spa)) &&
spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER)) {
vdev_defer_resilver(newvd);
} else {
dsl_scan_restart_resilver(spa->spa_dsl_pool,
dtl_max_txg);
}
}
if (spa->spa_bootfs)
spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
/*
* Commit the config
*/
(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
spa_history_log_internal(spa, "vdev attach", NULL,
"%s vdev=%s %s vdev=%s",
replacing && newvd_isspare ? "spare in" :
replacing ? "replace" : "attach", newvdpath,
replacing ? "for" : "to", oldvdpath);
spa_strfree(oldvdpath);
spa_strfree(newvdpath);
return (0);
}
/*
* Detach a device from a mirror or replacing vdev.
*
* If 'replace_done' is specified, only detach if the parent
* is a replacing vdev.
*/
int
spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
{
uint64_t txg;
int error;
vdev_t *rvd __maybe_unused = spa->spa_root_vdev;
vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE;
uint64_t unspare_guid = 0;
char *vdpath;
ASSERT(spa_writeable(spa));
txg = spa_vdev_detach_enter(spa, guid);
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
/*
* Besides being called directly from the userland through the
* ioctl interface, spa_vdev_detach() can be potentially called
* at the end of spa_vdev_resilver_done().
*
* In the regular case, when we have a checkpoint this shouldn't
* happen as we never empty the DTLs of a vdev during the scrub
* [see comment in dsl_scan_done()]. Thus spa_vdev_resilvering_done()
* should never get here when we have a checkpoint.
*
* That said, even in a case when we checkpoint the pool exactly
* as spa_vdev_resilver_done() calls this function everything
* should be fine as the resilver will return right away.
*/
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
if (vd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = vd->vdev_parent;
/*
* If the parent/child relationship is not as expected, don't do it.
* Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
* vdev that's replacing B with C. The user's intent in replacing
* is to go from M(A,B) to M(A,C). If the user decides to cancel
* the replace by detaching C, the expected behavior is to end up
* M(A,B). But suppose that right after deciding to detach C,
* the replacement of B completes. We would have M(A,C), and then
* ask to detach C, which would leave us with just A -- not what
* the user wanted. To prevent this, we make sure that the
* parent/child relationship hasn't changed -- in this example,
* that C's parent is still the replacing vdev R.
*/
if (pvd->vdev_guid != pguid && pguid != 0)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
/*
* Only 'replacing' or 'spare' vdevs can be replaced.
*/
if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
spa_version(spa) >= SPA_VERSION_SPARES);
/*
* Only mirror, replacing, and spare vdevs support detach.
*/
if (pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
/*
* If this device has the only valid copy of some data,
* we cannot safely detach it.
*/
if (vdev_dtl_required(vd))
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
ASSERT(pvd->vdev_children >= 2);
/*
* If we are detaching the second disk from a replacing vdev, then
* check to see if we changed the original vdev's path to have "/old"
* at the end in spa_vdev_attach(). If so, undo that change now.
*/
if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
vd->vdev_path != NULL) {
size_t len = strlen(vd->vdev_path);
for (int c = 0; c < pvd->vdev_children; c++) {
cvd = pvd->vdev_child[c];
if (cvd == vd || cvd->vdev_path == NULL)
continue;
if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
strcmp(cvd->vdev_path + len, "/old") == 0) {
spa_strfree(cvd->vdev_path);
cvd->vdev_path = spa_strdup(vd->vdev_path);
break;
}
}
}
/*
* If we are detaching the original disk from a normal spare, then it
* implies that the spare should become a real disk, and be removed
* from the active spare list for the pool. dRAID spares on the
* other hand are coupled to the pool and thus should never be removed
* from the spares list.
*/
if (pvd->vdev_ops == &vdev_spare_ops && vd->vdev_id == 0) {
vdev_t *last_cvd = pvd->vdev_child[pvd->vdev_children - 1];
if (last_cvd->vdev_isspare &&
last_cvd->vdev_ops != &vdev_draid_spare_ops) {
unspare = B_TRUE;
}
}
/*
* Erase the disk labels so the disk can be used for other things.
* This must be done after all other error cases are handled,
* but before we disembowel vd (so we can still do I/O to it).
* But if we can't do it, don't treat the error as fatal --
* it may be that the unwritability of the disk is the reason
* it's being detached!
*/
error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
/*
* Remove vd from its parent and compact the parent's children.
*/
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
/*
* Remember one of the remaining children so we can get tvd below.
*/
cvd = pvd->vdev_child[pvd->vdev_children - 1];
/*
* If we need to remove the remaining child from the list of hot spares,
* do it now, marking the vdev as no longer a spare in the process.
* We must do this before vdev_remove_parent(), because that can
* change the GUID if it creates a new toplevel GUID. For a similar
* reason, we must remove the spare now, in the same txg as the detach;
* otherwise someone could attach a new sibling, change the GUID, and
* the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
*/
if (unspare) {
ASSERT(cvd->vdev_isspare);
spa_spare_remove(cvd);
unspare_guid = cvd->vdev_guid;
(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
cvd->vdev_unspare = B_TRUE;
}
/*
* If the parent mirror/replacing vdev only has one child,
* the parent is no longer needed. Remove it from the tree.
*/
if (pvd->vdev_children == 1) {
if (pvd->vdev_ops == &vdev_spare_ops)
cvd->vdev_unspare = B_FALSE;
vdev_remove_parent(cvd);
}
/*
* We don't set tvd until now because the parent we just removed
* may have been the previous top-level vdev.
*/
tvd = cvd->vdev_top;
ASSERT(tvd->vdev_parent == rvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(cvd);
/*
* If the 'autoexpand' property is set on the pool then automatically
* try to expand the size of the pool. For example if the device we
* just detached was smaller than the others, it may be possible to
* add metaslabs (i.e. grow the pool). We need to reopen the vdev
* first so that we can obtain the updated sizes of the leaf vdevs.
*/
if (spa->spa_autoexpand) {
vdev_reopen(tvd);
vdev_expand(tvd, txg);
}
vdev_config_dirty(tvd);
/*
* Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
* vd->vdev_detached is set and free vd's DTL object in syncing context.
* But first make sure we're not on any *other* txg's DTL list, to
* prevent vd from being accessed after it's freed.
*/
vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
for (int t = 0; t < TXG_SIZE; t++)
(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
vd->vdev_detached = B_TRUE;
vdev_dirty(tvd, VDD_DTL, vd, txg);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
spa_notify_waiters(spa);
/* hang on to the spa before we release the lock */
spa_open_ref(spa, FTAG);
error = spa_vdev_exit(spa, vd, txg, 0);
spa_history_log_internal(spa, "detach", NULL,
"vdev=%s", vdpath);
spa_strfree(vdpath);
/*
* If this was the removal of the original device in a hot spare vdev,
* then we want to go through and remove the device from the hot spare
* list of every other pool.
*/
if (unspare) {
spa_t *altspa = NULL;
mutex_enter(&spa_namespace_lock);
while ((altspa = spa_next(altspa)) != NULL) {
if (altspa->spa_state != POOL_STATE_ACTIVE ||
altspa == spa)
continue;
spa_open_ref(altspa, FTAG);
mutex_exit(&spa_namespace_lock);
(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
mutex_enter(&spa_namespace_lock);
spa_close(altspa, FTAG);
}
mutex_exit(&spa_namespace_lock);
/* search the rest of the vdevs for spares to remove */
spa_vdev_resilver_done(spa);
}
/* all done with the spa; OK to release */
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
mutex_exit(&spa_namespace_lock);
return (error);
}
static int
spa_vdev_initialize_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
}
mutex_enter(&vd->vdev_initialize_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate an initialize action we check to see
* if the vdev_initialize_thread is NULL. We do this instead
* of using the vdev_initialize_state since there might be
* a previous initialization process which has completed but
* the thread is not exited.
*/
if (cmd_type == POOL_INITIALIZE_START &&
(vd->vdev_initialize_thread != NULL ||
vd->vdev_top->vdev_removing)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_INITIALIZE_CANCEL &&
(vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE &&
vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED)) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_INITIALIZE_SUSPEND &&
vd->vdev_initialize_state != VDEV_INITIALIZE_ACTIVE) {
mutex_exit(&vd->vdev_initialize_lock);
return (SET_ERROR(ESRCH));
}
switch (cmd_type) {
case POOL_INITIALIZE_START:
vdev_initialize(vd);
break;
case POOL_INITIALIZE_CANCEL:
vdev_initialize_stop(vd, VDEV_INITIALIZE_CANCELED, vd_list);
break;
case POOL_INITIALIZE_SUSPEND:
vdev_initialize_stop(vd, VDEV_INITIALIZE_SUSPENDED, vd_list);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_initialize_lock);
return (0);
}
int
spa_vdev_initialize(spa_t *spa, nvlist_t *nv, uint64_t cmd_type,
nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping initialization. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the initializing operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_initialize_impl(spa, vdev_guid, cmd_type,
&vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all initialize threads to stop. */
vdev_initialize_stop_wait(spa, &vd_list);
/* Sync out the initializing state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
static int
spa_vdev_trim_impl(spa_t *spa, uint64_t guid, uint64_t cmd_type,
uint64_t rate, boolean_t partial, boolean_t secure, list_t *vd_list)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
/* Look up vdev and ensure it's a leaf. */
vdev_t *vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || vd->vdev_detached) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(ENODEV));
} else if (!vd->vdev_ops->vdev_op_leaf || !vdev_is_concrete(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EINVAL));
} else if (!vdev_writeable(vd)) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EROFS));
} else if (!vd->vdev_has_trim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
} else if (secure && !vd->vdev_has_securetrim) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (SET_ERROR(EOPNOTSUPP));
}
mutex_enter(&vd->vdev_trim_lock);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
/*
* When we activate a TRIM action we check to see if the
* vdev_trim_thread is NULL. We do this instead of using the
* vdev_trim_state since there might be a previous TRIM process
* which has completed but the thread is not exited.
*/
if (cmd_type == POOL_TRIM_START &&
(vd->vdev_trim_thread != NULL || vd->vdev_top->vdev_removing)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(EBUSY));
} else if (cmd_type == POOL_TRIM_CANCEL &&
(vd->vdev_trim_state != VDEV_TRIM_ACTIVE &&
vd->vdev_trim_state != VDEV_TRIM_SUSPENDED)) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
} else if (cmd_type == POOL_TRIM_SUSPEND &&
vd->vdev_trim_state != VDEV_TRIM_ACTIVE) {
mutex_exit(&vd->vdev_trim_lock);
return (SET_ERROR(ESRCH));
}
switch (cmd_type) {
case POOL_TRIM_START:
vdev_trim(vd, rate, partial, secure);
break;
case POOL_TRIM_CANCEL:
vdev_trim_stop(vd, VDEV_TRIM_CANCELED, vd_list);
break;
case POOL_TRIM_SUSPEND:
vdev_trim_stop(vd, VDEV_TRIM_SUSPENDED, vd_list);
break;
default:
panic("invalid cmd_type %llu", (unsigned long long)cmd_type);
}
mutex_exit(&vd->vdev_trim_lock);
return (0);
}
/*
* Initiates a manual TRIM for the requested vdevs. This kicks off individual
* TRIM threads for each child vdev. These threads pass over all of the free
* space in the vdev's metaslabs and issues TRIM commands for that space.
*/
int
spa_vdev_trim(spa_t *spa, nvlist_t *nv, uint64_t cmd_type, uint64_t rate,
boolean_t partial, boolean_t secure, nvlist_t *vdev_errlist)
{
int total_errors = 0;
list_t vd_list;
list_create(&vd_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
/*
* We hold the namespace lock through the whole function
* to prevent any changes to the pool while we're starting or
* stopping TRIM. The config and state locks are held so that
* we can properly assess the vdev state before we commit to
* the TRIM operation.
*/
mutex_enter(&spa_namespace_lock);
for (nvpair_t *pair = nvlist_next_nvpair(nv, NULL);
pair != NULL; pair = nvlist_next_nvpair(nv, pair)) {
uint64_t vdev_guid = fnvpair_value_uint64(pair);
int error = spa_vdev_trim_impl(spa, vdev_guid, cmd_type,
rate, partial, secure, &vd_list);
if (error != 0) {
char guid_as_str[MAXNAMELEN];
(void) snprintf(guid_as_str, sizeof (guid_as_str),
"%llu", (unsigned long long)vdev_guid);
fnvlist_add_int64(vdev_errlist, guid_as_str, error);
total_errors++;
}
}
/* Wait for all TRIM threads to stop. */
vdev_trim_stop_wait(spa, &vd_list);
/* Sync out the TRIM state */
txg_wait_synced(spa->spa_dsl_pool, 0);
mutex_exit(&spa_namespace_lock);
list_destroy(&vd_list);
return (total_errors);
}
/*
* Split a set of devices from their mirrors, and create a new pool from them.
*/
int
spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
nvlist_t *props, boolean_t exp)
{
int error = 0;
uint64_t txg, *glist;
spa_t *newspa;
uint_t c, children, lastlog;
nvlist_t **child, *nvl, *tmp;
dmu_tx_t *tx;
char *altroot = NULL;
vdev_t *rvd, **vml = NULL; /* vdev modify list */
boolean_t activate_slog;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT)) {
error = (spa_has_checkpoint(spa)) ?
ZFS_ERR_CHECKPOINT_EXISTS : ZFS_ERR_DISCARDING_CHECKPOINT;
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* clear the log and flush everything up to now */
activate_slog = spa_passivate_log(spa);
(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
error = spa_reset_logs(spa);
txg = spa_vdev_config_enter(spa);
if (activate_slog)
spa_activate_log(spa);
if (error != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
/* check new spa name before going any further */
if (spa_lookup(newname) != NULL)
return (spa_vdev_exit(spa, NULL, txg, EEXIST));
/*
* scan through all the children to ensure they're all mirrors
*/
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* first, check to ensure we've got the right child count */
rvd = spa->spa_root_vdev;
lastlog = 0;
for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
/* don't count the holes & logs as children */
if (vd->vdev_islog || (vd->vdev_ops != &vdev_indirect_ops &&
!vdev_is_concrete(vd))) {
if (lastlog == 0)
lastlog = c;
continue;
}
lastlog = 0;
}
if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* next, ensure no spare or cache devices are part of the split */
if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
/* then, loop over each vdev and validate it */
for (c = 0; c < children; c++) {
uint64_t is_hole = 0;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole != 0) {
if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
continue;
} else {
error = SET_ERROR(EINVAL);
break;
}
}
/* deal with indirect vdevs */
if (spa->spa_root_vdev->vdev_child[c]->vdev_ops ==
&vdev_indirect_ops)
continue;
/* which disk is going to be split? */
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
&glist[c]) != 0) {
error = SET_ERROR(EINVAL);
break;
}
/* look it up in the spa */
vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
if (vml[c] == NULL) {
error = SET_ERROR(ENODEV);
break;
}
/* make sure there's nothing stopping the split */
if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
vml[c]->vdev_islog ||
!vdev_is_concrete(vml[c]) ||
vml[c]->vdev_isspare ||
vml[c]->vdev_isl2cache ||
!vdev_writeable(vml[c]) ||
vml[c]->vdev_children != 0 ||
vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
error = SET_ERROR(EINVAL);
break;
}
if (vdev_dtl_required(vml[c]) ||
vdev_resilver_needed(vml[c], NULL, NULL)) {
error = SET_ERROR(EBUSY);
break;
}
/* we need certain info from the top level */
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
vml[c]->vdev_top->vdev_ms_array) == 0);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
vml[c]->vdev_top->vdev_ms_shift) == 0);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
vml[c]->vdev_top->vdev_asize) == 0);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
vml[c]->vdev_top->vdev_ashift) == 0);
/* transfer per-vdev ZAPs */
ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_TOP_ZAP,
vml[c]->vdev_parent->vdev_top_zap));
}
if (error != 0) {
kmem_free(vml, children * sizeof (vdev_t *));
kmem_free(glist, children * sizeof (uint64_t));
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* stop writers from using the disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_TRUE;
}
vdev_reopen(spa->spa_root_vdev);
/*
* Temporarily record the splitting vdevs in the spa config. This
* will disappear once the config is regenerated.
*/
VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
glist, children) == 0);
kmem_free(glist, children * sizeof (uint64_t));
mutex_enter(&spa->spa_props_lock);
VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
nvl) == 0);
mutex_exit(&spa->spa_props_lock);
spa->spa_config_splitting = nvl;
vdev_config_dirty(spa->spa_root_vdev);
/* configure and create the new pool */
VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
spa_version(spa)) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
spa->spa_config_txg) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
spa_generate_guid(NULL)) == 0);
VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
/* add the new pool to the namespace */
newspa = spa_add(newname, config, altroot);
newspa->spa_avz_action = AVZ_ACTION_REBUILD;
newspa->spa_config_txg = spa->spa_config_txg;
spa_set_log_state(newspa, SPA_LOG_CLEAR);
/* release the spa config lock, retaining the namespace lock */
spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 1);
spa_activate(newspa, spa_mode_global);
spa_async_suspend(newspa);
/*
* Temporarily stop the initializing and TRIM activity. We set the
* state to ACTIVE so that we know to resume initializing or TRIM
* once the split has completed.
*/
list_t vd_initialize_list;
list_create(&vd_initialize_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_initialize_node));
list_t vd_trim_list;
list_create(&vd_trim_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_trim_node));
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
mutex_enter(&vml[c]->vdev_initialize_lock);
vdev_initialize_stop(vml[c],
VDEV_INITIALIZE_ACTIVE, &vd_initialize_list);
mutex_exit(&vml[c]->vdev_initialize_lock);
mutex_enter(&vml[c]->vdev_trim_lock);
vdev_trim_stop(vml[c], VDEV_TRIM_ACTIVE, &vd_trim_list);
mutex_exit(&vml[c]->vdev_trim_lock);
}
}
vdev_initialize_stop_wait(spa, &vd_initialize_list);
vdev_trim_stop_wait(spa, &vd_trim_list);
list_destroy(&vd_initialize_list);
list_destroy(&vd_trim_list);
newspa->spa_config_source = SPA_CONFIG_SRC_SPLIT;
newspa->spa_is_splitting = B_TRUE;
/* create the new pool from the disks of the original pool */
error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE);
if (error)
goto out;
/* if that worked, generate a real config for the new pool */
if (newspa->spa_root_vdev != NULL) {
VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
B_TRUE));
}
/* set the props */
if (props != NULL) {
spa_configfile_set(newspa, props, B_FALSE);
error = spa_prop_set(newspa, props);
if (error)
goto out;
}
/* flush everything */
txg = spa_vdev_config_enter(newspa);
vdev_config_dirty(newspa->spa_root_vdev);
(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 2);
spa_async_resume(newspa);
/* finally, update the original pool's config */
txg = spa_vdev_config_enter(spa);
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0)
dmu_tx_abort(tx);
for (c = 0; c < children; c++) {
if (vml[c] != NULL && vml[c]->vdev_ops != &vdev_indirect_ops) {
vdev_t *tvd = vml[c]->vdev_top;
/*
* Need to be sure the detachable VDEV is not
* on any *other* txg's DTL list to prevent it
* from being accessed after it's freed.
*/
for (int t = 0; t < TXG_SIZE; t++) {
(void) txg_list_remove_this(
&tvd->vdev_dtl_list, vml[c], t);
}
vdev_split(vml[c]);
if (error == 0)
spa_history_log_internal(spa, "detach", tx,
"vdev=%s", vml[c]->vdev_path);
vdev_free(vml[c]);
}
}
spa->spa_avz_action = AVZ_ACTION_REBUILD;
vdev_config_dirty(spa->spa_root_vdev);
spa->spa_config_splitting = NULL;
nvlist_free(nvl);
if (error == 0)
dmu_tx_commit(tx);
(void) spa_vdev_exit(spa, NULL, txg, 0);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 3);
/* split is complete; log a history record */
spa_history_log_internal(newspa, "split", NULL,
"from pool %s", spa_name(spa));
newspa->spa_is_splitting = B_FALSE;
kmem_free(vml, children * sizeof (vdev_t *));
/* if we're not going to mount the filesystems in userland, export */
if (exp)
error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
B_FALSE, B_FALSE);
return (error);
out:
spa_unload(newspa);
spa_deactivate(newspa);
spa_remove(newspa);
txg = spa_vdev_config_enter(spa);
/* re-online all offlined disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_FALSE;
}
/* restart initializing or trimming disks as necessary */
spa_async_request(spa, SPA_ASYNC_INITIALIZE_RESTART);
spa_async_request(spa, SPA_ASYNC_TRIM_RESTART);
spa_async_request(spa, SPA_ASYNC_AUTOTRIM_RESTART);
vdev_reopen(spa->spa_root_vdev);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
(void) spa_vdev_exit(spa, NULL, txg, error);
kmem_free(vml, children * sizeof (vdev_t *));
return (error);
}
/*
* Find any device that's done replacing, or a vdev marked 'unspare' that's
* currently spared, so we can detach it.
*/
static vdev_t *
spa_vdev_resilver_done_hunt(vdev_t *vd)
{
vdev_t *newvd, *oldvd;
for (int c = 0; c < vd->vdev_children; c++) {
oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
if (oldvd != NULL)
return (oldvd);
}
/*
* Check for a completed replacement. We always consider the first
* vdev in the list to be the oldest vdev, and the last one to be
* the newest (see spa_vdev_attach() for how that works). In
* the case where the newest vdev is faulted, we will not automatically
* remove it after a resilver completes. This is OK as it will require
* user intervention to determine which disk the admin wishes to keep.
*/
if (vd->vdev_ops == &vdev_replacing_ops) {
ASSERT(vd->vdev_children > 1);
newvd = vd->vdev_child[vd->vdev_children - 1];
oldvd = vd->vdev_child[0];
if (vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
}
/*
* Check for a completed resilver with the 'unspare' flag set.
* Also potentially update faulted state.
*/
if (vd->vdev_ops == &vdev_spare_ops) {
vdev_t *first = vd->vdev_child[0];
vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
if (last->vdev_unspare) {
oldvd = first;
newvd = last;
} else if (first->vdev_unspare) {
oldvd = last;
newvd = first;
} else {
oldvd = NULL;
}
if (oldvd != NULL &&
vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
vdev_propagate_state(vd);
/*
* If there are more than two spares attached to a disk,
* and those spares are not required, then we want to
* attempt to free them up now so that they can be used
* by other pools. Once we're back down to a single
* disk+spare, we stop removing them.
*/
if (vd->vdev_children > 2) {
newvd = vd->vdev_child[1];
if (newvd->vdev_isspare && last->vdev_isspare &&
vdev_dtl_empty(last, DTL_MISSING) &&
vdev_dtl_empty(last, DTL_OUTAGE) &&
!vdev_dtl_required(newvd))
return (newvd);
}
}
return (NULL);
}
static void
spa_vdev_resilver_done(spa_t *spa)
{
vdev_t *vd, *pvd, *ppvd;
uint64_t guid, sguid, pguid, ppguid;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
pvd = vd->vdev_parent;
ppvd = pvd->vdev_parent;
guid = vd->vdev_guid;
pguid = pvd->vdev_guid;
ppguid = ppvd->vdev_guid;
sguid = 0;
/*
* If we have just finished replacing a hot spared device, then
* we need to detach the parent's first child (the original hot
* spare) as well.
*/
if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
ppvd->vdev_children == 2) {
ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
sguid = ppvd->vdev_child[1]->vdev_guid;
}
ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
return;
if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
return;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
}
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* If a detach was not performed above replace waiters will not have
* been notified. In which case we must do so now.
*/
spa_notify_waiters(spa);
}
/*
* Update the stored path or FRU for this vdev.
*/
static int
spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
boolean_t ispath)
{
vdev_t *vd;
boolean_t sync = B_FALSE;
ASSERT(spa_writeable(spa));
spa_vdev_state_enter(spa, SCL_ALL);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, ENOENT));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
if (ispath) {
if (strcmp(value, vd->vdev_path) != 0) {
spa_strfree(vd->vdev_path);
vd->vdev_path = spa_strdup(value);
sync = B_TRUE;
}
} else {
if (vd->vdev_fru == NULL) {
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
} else if (strcmp(value, vd->vdev_fru) != 0) {
spa_strfree(vd->vdev_fru);
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
}
}
return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
}
int
spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
{
return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
}
int
spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
{
return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
}
/*
* ==========================================================================
* SPA Scanning
* ==========================================================================
*/
int
spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
}
int
spa_scan_stop(spa_t *spa)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scan_cancel(spa->spa_dsl_pool));
}
int
spa_scan(spa_t *spa, pool_scan_func_t func)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (SET_ERROR(ENOTSUP));
if (func == POOL_SCAN_RESILVER &&
!spa_feature_is_enabled(spa, SPA_FEATURE_RESILVER_DEFER))
return (SET_ERROR(ENOTSUP));
/*
* If a resilver was requested, but there is no DTL on a
* writeable leaf device, we have nothing to do.
*/
if (func == POOL_SCAN_RESILVER &&
!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
return (0);
}
return (dsl_scan(spa->spa_dsl_pool, func));
}
/*
* ==========================================================================
* SPA async task processing
* ==========================================================================
*/
static void
spa_async_remove(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_remove_wanted) {
vd->vdev_remove_wanted = B_FALSE;
vd->vdev_delayed_close = B_FALSE;
vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
/*
* We want to clear the stats, but we don't want to do a full
* vdev_clear() as that will cause us to throw away
* degraded/faulted state as well as attempt to reopen the
* device, all of which is a waste.
*/
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vdev_state_dirty(vd->vdev_top);
/* Tell userspace that the vdev is gone. */
zfs_post_remove(spa, vd);
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_remove(spa, vd->vdev_child[c]);
}
static void
spa_async_probe(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_probe_wanted) {
vd->vdev_probe_wanted = B_FALSE;
vdev_reopen(vd); /* vdev_open() does the actual probe */
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_probe(spa, vd->vdev_child[c]);
}
static void
spa_async_autoexpand(spa_t *spa, vdev_t *vd)
{
if (!spa->spa_autoexpand)
return;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
spa_async_autoexpand(spa, cvd);
}
if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
return;
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
}
static void
spa_async_thread(void *arg)
{
spa_t *spa = (spa_t *)arg;
dsl_pool_t *dp = spa->spa_dsl_pool;
int tasks;
ASSERT(spa->spa_sync_on);
mutex_enter(&spa->spa_async_lock);
tasks = spa->spa_async_tasks;
spa->spa_async_tasks = 0;
mutex_exit(&spa->spa_async_lock);
/*
* See if the config needs to be updated.
*/
if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
uint64_t old_space, new_space;
mutex_enter(&spa_namespace_lock);
old_space = metaslab_class_get_space(spa_normal_class(spa));
old_space += metaslab_class_get_space(spa_special_class(spa));
old_space += metaslab_class_get_space(spa_dedup_class(spa));
old_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
new_space = metaslab_class_get_space(spa_normal_class(spa));
new_space += metaslab_class_get_space(spa_special_class(spa));
new_space += metaslab_class_get_space(spa_dedup_class(spa));
new_space += metaslab_class_get_space(
spa_embedded_log_class(spa));
mutex_exit(&spa_namespace_lock);
/*
* If the pool grew as a result of the config update,
* then log an internal history event.
*/
if (new_space != old_space) {
spa_history_log_internal(spa, "vdev online", NULL,
"pool '%s' size: %llu(+%llu)",
spa_name(spa), (u_longlong_t)new_space,
(u_longlong_t)(new_space - old_space));
}
}
/*
* See if any devices need to be marked REMOVED.
*/
if (tasks & SPA_ASYNC_REMOVE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_remove(spa, spa->spa_root_vdev);
for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
for (int i = 0; i < spa->spa_spares.sav_count; i++)
spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_async_autoexpand(spa, spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
/*
* See if any devices need to be probed.
*/
if (tasks & SPA_ASYNC_PROBE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_probe(spa, spa->spa_root_vdev);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
/*
* If any devices are done replacing, detach them.
*/
if (tasks & SPA_ASYNC_RESILVER_DONE ||
tasks & SPA_ASYNC_REBUILD_DONE) {
spa_vdev_resilver_done(spa);
}
/*
* Kick off a resilver.
*/
if (tasks & SPA_ASYNC_RESILVER &&
!vdev_rebuild_active(spa->spa_root_vdev) &&
(!dsl_scan_resilvering(dp) ||
!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_RESILVER_DEFER)))
dsl_scan_restart_resilver(dp, 0);
if (tasks & SPA_ASYNC_INITIALIZE_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_initialize_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_TRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_restart(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
if (tasks & SPA_ASYNC_AUTOTRIM_RESTART) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_autotrim_restart(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache whole device TRIM.
*/
if (tasks & SPA_ASYNC_L2CACHE_TRIM) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
vdev_trim_l2arc(spa);
spa_config_exit(spa, SCL_CONFIG, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Kick off L2 cache rebuilding.
*/
if (tasks & SPA_ASYNC_L2CACHE_REBUILD) {
mutex_enter(&spa_namespace_lock);
spa_config_enter(spa, SCL_L2ARC, FTAG, RW_READER);
l2arc_spa_rebuild_start(spa);
spa_config_exit(spa, SCL_L2ARC, FTAG);
mutex_exit(&spa_namespace_lock);
}
/*
* Let the world know that we're done.
*/
mutex_enter(&spa->spa_async_lock);
spa->spa_async_thread = NULL;
cv_broadcast(&spa->spa_async_cv);
mutex_exit(&spa->spa_async_lock);
thread_exit();
}
void
spa_async_suspend(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
spa->spa_async_suspended++;
while (spa->spa_async_thread != NULL)
cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
mutex_exit(&spa->spa_async_lock);
spa_vdev_remove_suspend(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_cancel(condense_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_cancel(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_cancel(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_cancel(ll_condense_thread);
}
void
spa_async_resume(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
ASSERT(spa->spa_async_suspended != 0);
spa->spa_async_suspended--;
mutex_exit(&spa->spa_async_lock);
spa_restart_removal(spa);
zthr_t *condense_thread = spa->spa_condense_zthr;
if (condense_thread != NULL)
zthr_resume(condense_thread);
zthr_t *discard_thread = spa->spa_checkpoint_discard_zthr;
if (discard_thread != NULL)
zthr_resume(discard_thread);
zthr_t *ll_delete_thread = spa->spa_livelist_delete_zthr;
if (ll_delete_thread != NULL)
zthr_resume(ll_delete_thread);
zthr_t *ll_condense_thread = spa->spa_livelist_condense_zthr;
if (ll_condense_thread != NULL)
zthr_resume(ll_condense_thread);
}
static boolean_t
spa_async_tasks_pending(spa_t *spa)
{
uint_t non_config_tasks;
uint_t config_task;
boolean_t config_task_suspended;
non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
if (spa->spa_ccw_fail_time == 0) {
config_task_suspended = B_FALSE;
} else {
config_task_suspended =
(gethrtime() - spa->spa_ccw_fail_time) <
((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
}
return (non_config_tasks || (config_task && !config_task_suspended));
}
static void
spa_async_dispatch(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
if (spa_async_tasks_pending(spa) &&
!spa->spa_async_suspended &&
spa->spa_async_thread == NULL)
spa->spa_async_thread = thread_create(NULL, 0,
spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&spa->spa_async_lock);
}
void
spa_async_request(spa_t *spa, int task)
{
zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
mutex_enter(&spa->spa_async_lock);
spa->spa_async_tasks |= task;
mutex_exit(&spa->spa_async_lock);
}
int
spa_async_tasks(spa_t *spa)
{
return (spa->spa_async_tasks);
}
/*
* ==========================================================================
* SPA syncing routines
* ==========================================================================
*/
static int
bpobj_enqueue_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
bpobj_t *bpo = arg;
bpobj_enqueue(bpo, bp, bp_freed, tx);
return (0);
}
int
bpobj_enqueue_alloc_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_FALSE, tx));
}
int
bpobj_enqueue_free_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
return (bpobj_enqueue_cb(arg, bp, B_TRUE, tx));
}
static int
spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
zio_t *pio = arg;
zio_nowait(zio_free_sync(pio, pio->io_spa, dmu_tx_get_txg(tx), bp,
pio->io_flags));
return (0);
}
static int
bpobj_spa_free_sync_cb(void *arg, const blkptr_t *bp, boolean_t bp_freed,
dmu_tx_t *tx)
{
ASSERT(!bp_freed);
return (spa_free_sync_cb(arg, bp, tx));
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing frees.
*/
static void
spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
{
zio_t *zio = zio_root(spa, NULL, NULL, 0);
bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
VERIFY(zio_wait(zio) == 0);
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing deferred frees.
*/
static void
spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
/*
* Note:
* If the log space map feature is active, we stop deferring
* frees to the next TXG and therefore running this function
* would be considered a no-op as spa_deferred_bpobj should
* not have any entries.
*
* That said we run this function anyway (instead of returning
* immediately) for the edge-case scenario where we just
* activated the log space map feature in this TXG but we have
* deferred frees from the previous TXG.
*/
zio_t *zio = zio_root(spa, NULL, NULL, 0);
VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
bpobj_spa_free_sync_cb, zio, tx), ==, 0);
VERIFY0(zio_wait(zio));
}
static void
spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
{
char *packed = NULL;
size_t bufsize;
size_t nvsize = 0;
dmu_buf_t *db;
VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
/*
* Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
* information. This avoids the dmu_buf_will_dirty() path and
* saves us a pre-read to get data we don't actually care about.
*/
bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
packed = vmem_alloc(bufsize, KM_SLEEP);
VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
KM_SLEEP) == 0);
bzero(packed + nvsize, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
vmem_free(packed, bufsize);
VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = nvsize;
dmu_buf_rele(db, FTAG);
}
static void
spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
const char *config, const char *entry)
{
nvlist_t *nvroot;
nvlist_t **list;
int i;
if (!sav->sav_sync)
return;
/*
* Update the MOS nvlist describing the list of available devices.
* spa_validate_aux() will have already made sure this nvlist is
* valid and the vdevs are labeled appropriately.
*/
if (sav->sav_object == 0) {
sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
sizeof (uint64_t), tx);
VERIFY(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
&sav->sav_object, tx) == 0);
}
VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
if (sav->sav_count == 0) {
VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
} else {
list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
B_FALSE, VDEV_CONFIG_L2CACHE);
VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
sav->sav_count) == 0);
for (i = 0; i < sav->sav_count; i++)
nvlist_free(list[i]);
kmem_free(list, sav->sav_count * sizeof (void *));
}
spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
nvlist_free(nvroot);
sav->sav_sync = B_FALSE;
}
/*
* Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
* The all-vdev ZAP must be empty.
*/
static void
spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
if (vd->vdev_top_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_top_zap, tx));
}
if (vd->vdev_leaf_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_leaf_zap, tx));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
spa_avz_build(vd->vdev_child[i], avz, tx);
}
}
static void
spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
{
nvlist_t *config;
/*
* If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
* its config may not be dirty but we still need to build per-vdev ZAPs.
* Similarly, if the pool is being assembled (e.g. after a split), we
* need to rebuild the AVZ although the config may not be dirty.
*/
if (list_is_empty(&spa->spa_config_dirty_list) &&
spa->spa_avz_action == AVZ_ACTION_NONE)
return;
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
spa->spa_all_vdev_zaps != 0);
if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
/* Make and build the new AVZ */
uint64_t new_avz = zap_create(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
spa_avz_build(spa->spa_root_vdev, new_avz, tx);
/* Diff old AVZ with new one */
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t vdzap = za.za_first_integer;
if (zap_lookup_int(spa->spa_meta_objset, new_avz,
vdzap) == ENOENT) {
/*
* ZAP is listed in old AVZ but not in new one;
* destroy it
*/
VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
tx));
}
}
zap_cursor_fini(&zc);
/* Destroy the old AVZ */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
/* Replace the old AVZ in the dir obj with the new one */
VERIFY0(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
sizeof (new_avz), 1, &new_avz, tx));
spa->spa_all_vdev_zaps = new_avz;
} else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
zap_cursor_t zc;
zap_attribute_t za;
/* Walk through the AVZ and destroy all listed ZAPs */
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t zap = za.za_first_integer;
VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
}
zap_cursor_fini(&zc);
/* Destroy and unlink the AVZ itself */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
VERIFY0(zap_remove(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
spa->spa_all_vdev_zaps = 0;
}
if (spa->spa_all_vdev_zaps == 0) {
spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_VDEV_ZAP_MAP, tx);
}
spa->spa_avz_action = AVZ_ACTION_NONE;
/* Create ZAPs for vdevs that don't have them. */
vdev_construct_zaps(spa->spa_root_vdev, tx);
config = spa_config_generate(spa, spa->spa_root_vdev,
dmu_tx_get_txg(tx), B_FALSE);
/*
* If we're upgrading the spa version then make sure that
* the config object gets updated with the correct version.
*/
if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
spa->spa_uberblock.ub_version);
spa_config_exit(spa, SCL_STATE, FTAG);
nvlist_free(spa->spa_config_syncing);
spa->spa_config_syncing = config;
spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
}
static void
spa_sync_version(void *arg, dmu_tx_t *tx)
{
uint64_t *versionp = arg;
uint64_t version = *versionp;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
/*
* Setting the version is special cased when first creating the pool.
*/
ASSERT(tx->tx_txg != TXG_INITIAL);
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
ASSERT(version >= spa_version(spa));
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_history_log_internal(spa, "set", tx, "version=%lld",
(longlong_t)version);
}
/*
* Set zpool properties.
*/
static void
spa_sync_props(void *arg, dmu_tx_t *tx)
{
nvlist_t *nvp = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = spa->spa_meta_objset;
nvpair_t *elem = NULL;
mutex_enter(&spa->spa_props_lock);
while ((elem = nvlist_next_nvpair(nvp, elem))) {
uint64_t intval;
char *strval, *fname;
zpool_prop_t prop;
const char *propname;
zprop_type_t proptype;
spa_feature_t fid;
switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
case ZPOOL_PROP_INVAL:
/*
* We checked this earlier in spa_prop_validate().
*/
ASSERT(zpool_prop_feature(nvpair_name(elem)));
fname = strchr(nvpair_name(elem), '@') + 1;
VERIFY0(zfeature_lookup_name(fname, &fid));
spa_feature_enable(spa, fid, tx);
spa_history_log_internal(spa, "set", tx,
"%s=enabled", nvpair_name(elem));
break;
case ZPOOL_PROP_VERSION:
intval = fnvpair_value_uint64(elem);
/*
* The version is synced separately before other
* properties and should be correct by now.
*/
ASSERT3U(spa_version(spa), >=, intval);
break;
case ZPOOL_PROP_ALTROOT:
/*
* 'altroot' is a non-persistent property. It should
* have been set temporarily at creation or import time.
*/
ASSERT(spa->spa_root != NULL);
break;
case ZPOOL_PROP_READONLY:
case ZPOOL_PROP_CACHEFILE:
/*
* 'readonly' and 'cachefile' are also non-persistent
* properties.
*/
break;
case ZPOOL_PROP_COMMENT:
strval = fnvpair_value_string(elem);
if (spa->spa_comment != NULL)
spa_strfree(spa->spa_comment);
spa->spa_comment = spa_strdup(strval);
/*
* We need to dirty the configuration on all the vdevs
* so that their labels get updated. We also need to
* update the cache file to keep it in sync with the
* MOS version. It's unnecessary to do this for pool
* creation since the vdev's configuration has already
* been dirtied.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
case ZPOOL_PROP_COMPATIBILITY:
strval = fnvpair_value_string(elem);
if (spa->spa_compatibility != NULL)
spa_strfree(spa->spa_compatibility);
spa->spa_compatibility = spa_strdup(strval);
/*
* Dirty the configuration on vdevs as above.
*/
if (tx->tx_txg != TXG_INITIAL) {
vdev_config_dirty(spa->spa_root_vdev);
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
default:
/*
* Set pool property values in the poolprops mos object.
*/
if (spa->spa_pool_props_object == 0) {
spa->spa_pool_props_object =
zap_create_link(mos, DMU_OT_POOL_PROPS,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
tx);
}
/* normalize the property name */
propname = zpool_prop_to_name(prop);
proptype = zpool_prop_get_type(prop);
if (nvpair_type(elem) == DATA_TYPE_STRING) {
ASSERT(proptype == PROP_TYPE_STRING);
strval = fnvpair_value_string(elem);
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
1, strlen(strval) + 1, strval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
intval = fnvpair_value_uint64(elem);
if (proptype == PROP_TYPE_INDEX) {
const char *unused;
VERIFY0(zpool_prop_index_to_string(
prop, intval, &unused));
}
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
8, 1, &intval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%lld", nvpair_name(elem),
(longlong_t)intval);
} else {
ASSERT(0); /* not allowed */
}
switch (prop) {
case ZPOOL_PROP_DELEGATION:
spa->spa_delegation = intval;
break;
case ZPOOL_PROP_BOOTFS:
spa->spa_bootfs = intval;
break;
case ZPOOL_PROP_FAILUREMODE:
spa->spa_failmode = intval;
break;
case ZPOOL_PROP_AUTOTRIM:
spa->spa_autotrim = intval;
spa_async_request(spa,
SPA_ASYNC_AUTOTRIM_RESTART);
break;
case ZPOOL_PROP_AUTOEXPAND:
spa->spa_autoexpand = intval;
if (tx->tx_txg != TXG_INITIAL)
spa_async_request(spa,
SPA_ASYNC_AUTOEXPAND);
break;
case ZPOOL_PROP_MULTIHOST:
spa->spa_multihost = intval;
break;
default:
break;
}
}
}
mutex_exit(&spa->spa_props_lock);
}
/*
* Perform one-time upgrade on-disk changes. spa_version() does not
* reflect the new version this txg, so there must be no changes this
* txg to anything that the upgrade code depends on after it executes.
* Therefore this must be called after dsl_pool_sync() does the sync
* tasks.
*/
static void
spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
{
if (spa_sync_pass(spa) != 1)
return;
dsl_pool_t *dp = spa->spa_dsl_pool;
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
dsl_pool_create_origin(dp, tx);
/* Keeping the origin open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
dsl_pool_upgrade_clones(dp, tx);
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
dsl_pool_upgrade_dir_clones(dp, tx);
/* Keeping the freedir open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
spa_feature_create_zap_objects(spa, tx);
}
/*
* LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
* when possibility to use lz4 compression for metadata was added
* Old pools that have this feature enabled must be upgraded to have
* this feature active
*/
if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
boolean_t lz4_en = spa_feature_is_enabled(spa,
SPA_FEATURE_LZ4_COMPRESS);
boolean_t lz4_ac = spa_feature_is_active(spa,
SPA_FEATURE_LZ4_COMPRESS);
if (lz4_en && !lz4_ac)
spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
}
/*
* If we haven't written the salt, do so now. Note that the
* feature may not be activated yet, but that's fine since
* the presence of this ZAP entry is backwards compatible.
*/
if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT) == ENOENT) {
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes, tx));
}
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
static void
vdev_indirect_state_sync_verify(vdev_t *vd)
{
vdev_indirect_mapping_t *vim __maybe_unused = vd->vdev_indirect_mapping;
vdev_indirect_births_t *vib __maybe_unused = vd->vdev_indirect_births;
if (vd->vdev_ops == &vdev_indirect_ops) {
ASSERT(vim != NULL);
ASSERT(vib != NULL);
}
uint64_t obsolete_sm_object = 0;
ASSERT0(vdev_obsolete_sm_object(vd, &obsolete_sm_object));
if (obsolete_sm_object != 0) {
ASSERT(vd->vdev_obsolete_sm != NULL);
ASSERT(vd->vdev_removing ||
vd->vdev_ops == &vdev_indirect_ops);
ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
ASSERT3U(obsolete_sm_object, ==,
space_map_object(vd->vdev_obsolete_sm));
ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
space_map_allocated(vd->vdev_obsolete_sm));
}
ASSERT(vd->vdev_obsolete_segments != NULL);
/*
* Since frees / remaps to an indirect vdev can only
* happen in syncing context, the obsolete segments
* tree must be empty when we start syncing.
*/
ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
}
/*
* Set the top-level vdev's max queue depth. Evaluate each top-level's
* async write queue depth in case it changed. The max queue depth will
* not change in the middle of syncing out this txg.
*/
static void
spa_sync_adjust_vdev_max_queue_depth(spa_t *spa)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
zfs_vdev_queue_depth_pct / 100;
metaslab_class_t *normal = spa_normal_class(spa);
metaslab_class_t *special = spa_special_class(spa);
metaslab_class_t *dedup = spa_dedup_class(spa);
uint64_t slots_per_allocator = 0;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || !metaslab_group_initialized(mg))
continue;
metaslab_class_t *mc = mg->mg_class;
if (mc != normal && mc != special && mc != dedup)
continue;
/*
* It is safe to do a lock-free check here because only async
* allocations look at mg_max_alloc_queue_depth, and async
* allocations all happen from spa_sync().
*/
for (int i = 0; i < mg->mg_allocators; i++) {
ASSERT0(zfs_refcount_count(
&(mg->mg_allocator[i].mga_alloc_queue_depth)));
}
mg->mg_max_alloc_queue_depth = max_queue_depth;
for (int i = 0; i < mg->mg_allocators; i++) {
mg->mg_allocator[i].mga_cur_max_alloc_queue_depth =
zfs_vdev_def_queue_depth;
}
slots_per_allocator += zfs_vdev_def_queue_depth;
}
for (int i = 0; i < spa->spa_alloc_count; i++) {
ASSERT0(zfs_refcount_count(&normal->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&special->mc_allocator[i].
mca_alloc_slots));
ASSERT0(zfs_refcount_count(&dedup->mc_allocator[i].
mca_alloc_slots));
normal->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
special->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
dedup->mc_allocator[i].mca_alloc_max_slots =
slots_per_allocator;
}
normal->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
special->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
dedup->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
}
static void
spa_sync_condense_indirect(spa_t *spa, dmu_tx_t *tx)
{
ASSERT(spa_writeable(spa));
vdev_t *rvd = spa->spa_root_vdev;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
vdev_indirect_state_sync_verify(vd);
if (vdev_indirect_should_condense(vd)) {
spa_condense_indirect_start_sync(vd, tx);
break;
}
}
}
static void
spa_sync_iterate_to_convergence(spa_t *spa, dmu_tx_t *tx)
{
objset_t *mos = spa->spa_meta_objset;
dsl_pool_t *dp = spa->spa_dsl_pool;
uint64_t txg = tx->tx_txg;
bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
do {
int pass = ++spa->spa_sync_pass;
spa_sync_config_object(spa, tx);
spa_sync_aux_dev(spa, &spa->spa_spares, tx,
ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
spa_errlog_sync(spa, txg);
dsl_pool_sync(dp, txg);
if (pass < zfs_sync_pass_deferred_free ||
spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP)) {
/*
* If the log space map feature is active we don't
* care about deferred frees and the deferred bpobj
* as the log space map should effectively have the
* same results (i.e. appending only to one object).
*/
spa_sync_frees(spa, free_bpl, tx);
} else {
/*
* We can not defer frees in pass 1, because
* we sync the deferred frees later in pass 1.
*/
ASSERT3U(pass, >, 1);
bplist_iterate(free_bpl, bpobj_enqueue_alloc_cb,
&spa->spa_deferred_bpobj, tx);
}
ddt_sync(spa, txg);
dsl_scan_sync(dp, tx);
svr_sync(spa, tx);
spa_sync_upgrades(spa, tx);
spa_flush_metaslabs(spa, tx);
vdev_t *vd = NULL;
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
!= NULL)
vdev_sync(vd, txg);
/*
* Note: We need to check if the MOS is dirty because we could
* have marked the MOS dirty without updating the uberblock
* (e.g. if we have sync tasks but no dirty user data). We need
* to check the uberblock's rootbp because it is updated if we
* have synced out dirty data (though in this case the MOS will
* most likely also be dirty due to second order effects, we
* don't want to rely on that here).
*/
if (pass == 1 &&
spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
!dmu_objset_is_dirty(mos, txg)) {
/*
* Nothing changed on the first pass, therefore this
* TXG is a no-op. Avoid syncing deferred frees, so
* that we can keep this TXG as a no-op.
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
ASSERT(txg_list_empty(&dp->dp_early_sync_tasks, txg));
break;
}
spa_sync_deferred_frees(spa, tx);
} while (dmu_objset_is_dirty(mos, txg));
}
/*
* Rewrite the vdev configuration (which includes the uberblock) to
* commit the transaction group.
*
* If there are no dirty vdevs, we sync the uberblock to a few random
* top-level vdevs that are known to be visible in the config cache
* (see spa_vdev_add() for a complete description). If there *are* dirty
* vdevs, sync the uberblock to all vdevs.
*/
static void
spa_sync_rewrite_vdev_config(spa_t *spa, dmu_tx_t *tx)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t txg = tx->tx_txg;
for (;;) {
int error = 0;
/*
* We hold SCL_STATE to prevent vdev open/close/etc.
* while we're attempting to write the vdev labels.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
if (list_is_empty(&spa->spa_config_dirty_list)) {
vdev_t *svd[SPA_SYNC_MIN_VDEVS] = { NULL };
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = random_in_range(children);
for (int c = 0; c < children; c++) {
vdev_t *vd =
rvd->vdev_child[(c0 + c) % children];
/* Stop when revisiting the first vdev */
if (c > 0 && svd[0] == vd)
break;
if (vd->vdev_ms_array == 0 ||
vd->vdev_islog ||
!vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_SYNC_MIN_VDEVS)
break;
}
error = vdev_config_sync(svd, svdcount, txg);
} else {
error = vdev_config_sync(rvd->vdev_child,
rvd->vdev_children, txg);
}
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_STATE, FTAG);
if (error == 0)
break;
zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
zio_resume_wait(spa);
}
}
/*
* Sync the specified transaction group. New blocks may be dirtied as
* part of the process, so we iterate until it converges.
*/
void
spa_sync(spa_t *spa, uint64_t txg)
{
vdev_t *vd = NULL;
VERIFY(spa_writeable(spa));
/*
* Wait for i/os issued in open context that need to complete
* before this txg syncs.
*/
(void) zio_wait(spa->spa_txg_zio[txg & TXG_MASK]);
spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL);
/*
* Lock out configuration changes.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa->spa_syncing_txg = txg;
spa->spa_sync_pass = 0;
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* If there are any pending vdev state changes, convert them
* into config changes that go out with this transaction group.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
while (list_head(&spa->spa_state_dirty_list) != NULL) {
/*
* We need the write lock here because, for aux vdevs,
* calling vdev_config_dirty() modifies sav_config.
* This is ugly and will become unnecessary when we
* eliminate the aux vdev wart by integrating all vdevs
* into the root vdev tree.
*/
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
vdev_state_clean(vd);
vdev_config_dirty(vd);
}
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
}
spa_config_exit(spa, SCL_STATE, FTAG);
dsl_pool_t *dp = spa->spa_dsl_pool;
dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
spa->spa_sync_starttime = gethrtime();
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
NSEC_TO_TICK(spa->spa_deadman_synctime));
/*
* If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
* set spa_deflate if we have no raid-z vdevs.
*/
if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
vdev_t *rvd = spa->spa_root_vdev;
int i;
for (i = 0; i < rvd->vdev_children; i++) {
vd = rvd->vdev_child[i];
if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
break;
}
if (i == rvd->vdev_children) {
spa->spa_deflate = TRUE;
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx));
}
}
spa_sync_adjust_vdev_max_queue_depth(spa);
spa_sync_condense_indirect(spa, tx);
spa_sync_iterate_to_convergence(spa, tx);
#ifdef ZFS_DEBUG
if (!list_is_empty(&spa->spa_config_dirty_list)) {
/*
* Make sure that the number of ZAPs for all the vdevs matches
* the number of ZAPs in the per-vdev ZAP list. This only gets
* called if the config is dirty; otherwise there may be
* outstanding AVZ operations that weren't completed in
* spa_sync_config_object.
*/
uint64_t all_vdev_zap_entry_count;
ASSERT0(zap_count(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
all_vdev_zap_entry_count);
}
#endif
if (spa->spa_vdev_removal != NULL) {
ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
}
spa_sync_rewrite_vdev_config(spa, tx);
dmu_tx_commit(tx);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = 0;
/*
* Clear the dirty config list.
*/
while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
vdev_config_clean(vd);
/*
* Now that the new config has synced transactionally,
* let it become visible to the config cache.
*/
if (spa->spa_config_syncing != NULL) {
spa_config_set(spa, spa->spa_config_syncing);
spa->spa_config_txg = txg;
spa->spa_config_syncing = NULL;
}
dsl_pool_sync_done(dp, txg);
for (int i = 0; i < spa->spa_alloc_count; i++) {
mutex_enter(&spa->spa_allocs[i].spaa_lock);
VERIFY0(avl_numnodes(&spa->spa_allocs[i].spaa_tree));
mutex_exit(&spa->spa_allocs[i].spaa_lock);
}
/*
* Update usable space statistics.
*/
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)))
!= NULL)
vdev_sync_done(vd, txg);
metaslab_class_evict_old(spa->spa_normal_class, txg);
metaslab_class_evict_old(spa->spa_log_class, txg);
spa_sync_close_syncing_log_sm(spa);
spa_update_dspace(spa);
/*
* It had better be the case that we didn't dirty anything
* since vdev_config_sync().
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
while (zfs_pause_spa_sync)
delay(1);
spa->spa_sync_pass = 0;
/*
* Update the last synced uberblock here. We want to do this at
* the end of spa_sync() so that consumers of spa_last_synced_txg()
* will be guaranteed that all the processing associated with
* that txg has been completed.
*/
spa->spa_ubsync = spa->spa_uberblock;
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_handle_ignored_writes(spa);
/*
* If any async tasks have been requested, kick them off.
*/
spa_async_dispatch(spa);
}
/*
* Sync all pools. We don't want to hold the namespace lock across these
* operations, so we take a reference on the spa_t and drop the lock during the
* sync.
*/
void
spa_sync_allpools(void)
{
spa_t *spa = NULL;
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL) {
if (spa_state(spa) != POOL_STATE_ACTIVE ||
!spa_writeable(spa) || spa_suspended(spa))
continue;
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
}
mutex_exit(&spa_namespace_lock);
}
/*
* ==========================================================================
* Miscellaneous routines
* ==========================================================================
*/
/*
* Remove all pools in the system.
*/
void
spa_evict_all(void)
{
spa_t *spa;
/*
* Remove all cached state. All pools should be closed now,
* so every spa in the AVL tree should be unreferenced.
*/
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(NULL)) != NULL) {
/*
* Stop async tasks. The async thread may need to detach
* a device that's been replaced, which requires grabbing
* spa_namespace_lock, so we must drop it here.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
spa_remove(spa);
}
mutex_exit(&spa_namespace_lock);
}
vdev_t *
spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
{
vdev_t *vd;
int i;
if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
return (vd);
if (aux) {
for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
vd = spa->spa_l2cache.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
}
return (NULL);
}
void
spa_upgrade(spa_t *spa, uint64_t version)
{
ASSERT(spa_writeable(spa));
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* This should only be called for a non-faulted pool, and since a
* future version would result in an unopenable pool, this shouldn't be
* possible.
*/
ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
txg_wait_synced(spa_get_dsl(spa), 0);
}
boolean_t
spa_has_spare(spa_t *spa, uint64_t guid)
{
int i;
uint64_t spareguid;
spa_aux_vdev_t *sav = &spa->spa_spares;
for (i = 0; i < sav->sav_count; i++)
if (sav->sav_vdevs[i]->vdev_guid == guid)
return (B_TRUE);
for (i = 0; i < sav->sav_npending; i++) {
if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
&spareguid) == 0 && spareguid == guid)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Check if a pool has an active shared spare device.
* Note: reference count of an active spare is 2, as a spare and as a replace
*/
static boolean_t
spa_has_active_shared_spare(spa_t *spa)
{
int i, refcnt;
uint64_t pool;
spa_aux_vdev_t *sav = &spa->spa_spares;
for (i = 0; i < sav->sav_count; i++) {
if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
&refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
refcnt > 2)
return (B_TRUE);
}
return (B_FALSE);
}
uint64_t
spa_total_metaslabs(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t m = 0;
for (uint64_t c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
if (!vdev_is_concrete(vd))
continue;
m += vd->vdev_ms_count;
}
return (m);
}
/*
* Notify any waiting threads that some activity has switched from being in-
* progress to not-in-progress so that the thread can wake up and determine
* whether it is finished waiting.
*/
void
spa_notify_waiters(spa_t *spa)
{
/*
* Acquiring spa_activities_lock here prevents the cv_broadcast from
* happening between the waiting thread's check and cv_wait.
*/
mutex_enter(&spa->spa_activities_lock);
cv_broadcast(&spa->spa_activities_cv);
mutex_exit(&spa->spa_activities_lock);
}
/*
* Notify any waiting threads that the pool is exporting, and then block until
* they are finished using the spa_t.
*/
void
spa_wake_waiters(spa_t *spa)
{
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters_cancel = B_TRUE;
cv_broadcast(&spa->spa_activities_cv);
while (spa->spa_waiters != 0)
cv_wait(&spa->spa_waiters_cv, &spa->spa_activities_lock);
spa->spa_waiters_cancel = B_FALSE;
mutex_exit(&spa->spa_activities_lock);
}
/* Whether the vdev or any of its descendants are being initialized/trimmed. */
static boolean_t
spa_vdev_activity_in_progress_impl(vdev_t *vd, zpool_wait_activity_t activity)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER));
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
ASSERT(activity == ZPOOL_WAIT_INITIALIZE ||
activity == ZPOOL_WAIT_TRIM);
kmutex_t *lock = activity == ZPOOL_WAIT_INITIALIZE ?
&vd->vdev_initialize_lock : &vd->vdev_trim_lock;
mutex_exit(&spa->spa_activities_lock);
mutex_enter(lock);
mutex_enter(&spa->spa_activities_lock);
boolean_t in_progress = (activity == ZPOOL_WAIT_INITIALIZE) ?
(vd->vdev_initialize_state == VDEV_INITIALIZE_ACTIVE) :
(vd->vdev_trim_state == VDEV_TRIM_ACTIVE);
mutex_exit(lock);
if (in_progress)
return (B_TRUE);
for (int i = 0; i < vd->vdev_children; i++) {
if (spa_vdev_activity_in_progress_impl(vd->vdev_child[i],
activity))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* If use_guid is true, this checks whether the vdev specified by guid is
* being initialized/trimmed. Otherwise, it checks whether any vdev in the pool
* is being initialized/trimmed. The caller must hold the config lock and
* spa_activities_lock.
*/
static int
spa_vdev_activity_in_progress(spa_t *spa, boolean_t use_guid, uint64_t guid,
zpool_wait_activity_t activity, boolean_t *in_progress)
{
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
vdev_t *vd;
if (use_guid) {
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL || !vd->vdev_ops->vdev_op_leaf) {
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (EINVAL);
}
} else {
vd = spa->spa_root_vdev;
}
*in_progress = spa_vdev_activity_in_progress_impl(vd, activity);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (0);
}
/*
* Locking for waiting threads
* ---------------------------
*
* Waiting threads need a way to check whether a given activity is in progress,
* and then, if it is, wait for it to complete. Each activity will have some
* in-memory representation of the relevant on-disk state which can be used to
* determine whether or not the activity is in progress. The in-memory state and
* the locking used to protect it will be different for each activity, and may
* not be suitable for use with a cvar (e.g., some state is protected by the
* config lock). To allow waiting threads to wait without any races, another
* lock, spa_activities_lock, is used.
*
* When the state is checked, both the activity-specific lock (if there is one)
* and spa_activities_lock are held. In some cases, the activity-specific lock
* is acquired explicitly (e.g. the config lock). In others, the locking is
* internal to some check (e.g. bpobj_is_empty). After checking, the waiting
* thread releases the activity-specific lock and, if the activity is in
* progress, then cv_waits using spa_activities_lock.
*
* The waiting thread is woken when another thread, one completing some
* activity, updates the state of the activity and then calls
* spa_notify_waiters, which will cv_broadcast. This 'completing' thread only
* needs to hold its activity-specific lock when updating the state, and this
* lock can (but doesn't have to) be dropped before calling spa_notify_waiters.
*
* Because spa_notify_waiters acquires spa_activities_lock before broadcasting,
* and because it is held when the waiting thread checks the state of the
* activity, it can never be the case that the completing thread both updates
* the activity state and cv_broadcasts in between the waiting thread's check
* and cv_wait. Thus, a waiting thread can never miss a wakeup.
*
* In order to prevent deadlock, when the waiting thread does its check, in some
* cases it will temporarily drop spa_activities_lock in order to acquire the
* activity-specific lock. The order in which spa_activities_lock and the
* activity specific lock are acquired in the waiting thread is determined by
* the order in which they are acquired in the completing thread; if the
* completing thread calls spa_notify_waiters with the activity-specific lock
* held, then the waiting thread must also acquire the activity-specific lock
* first.
*/
static int
spa_activity_in_progress(spa_t *spa, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *in_progress)
{
int error = 0;
ASSERT(MUTEX_HELD(&spa->spa_activities_lock));
switch (activity) {
case ZPOOL_WAIT_CKPT_DISCARD:
*in_progress =
(spa_feature_is_active(spa, SPA_FEATURE_POOL_CHECKPOINT) &&
zap_contains(spa_meta_objset(spa),
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ZPOOL_CHECKPOINT) ==
ENOENT);
break;
case ZPOOL_WAIT_FREE:
*in_progress = ((spa_version(spa) >= SPA_VERSION_DEADLISTS &&
!bpobj_is_empty(&spa->spa_dsl_pool->dp_free_bpobj)) ||
spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY) ||
spa_livelist_delete_check(spa));
break;
case ZPOOL_WAIT_INITIALIZE:
case ZPOOL_WAIT_TRIM:
error = spa_vdev_activity_in_progress(spa, use_tag, tag,
activity, in_progress);
break;
case ZPOOL_WAIT_REPLACE:
mutex_exit(&spa->spa_activities_lock);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
mutex_enter(&spa->spa_activities_lock);
*in_progress = vdev_replace_in_progress(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
break;
case ZPOOL_WAIT_REMOVE:
*in_progress = (spa->spa_removing_phys.sr_state ==
DSS_SCANNING);
break;
case ZPOOL_WAIT_RESILVER:
if ((*in_progress = vdev_rebuild_active(spa->spa_root_vdev)))
break;
- /* fall through */
+ fallthrough;
case ZPOOL_WAIT_SCRUB:
{
boolean_t scanning, paused, is_scrub;
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
is_scrub = (scn->scn_phys.scn_func == POOL_SCAN_SCRUB);
scanning = (scn->scn_phys.scn_state == DSS_SCANNING);
paused = dsl_scan_is_paused_scrub(scn);
*in_progress = (scanning && !paused &&
is_scrub == (activity == ZPOOL_WAIT_SCRUB));
break;
}
default:
panic("unrecognized value for activity %d", activity);
}
return (error);
}
static int
spa_wait_common(const char *pool, zpool_wait_activity_t activity,
boolean_t use_tag, uint64_t tag, boolean_t *waited)
{
/*
* The tag is used to distinguish between instances of an activity.
* 'initialize' and 'trim' are the only activities that we use this for.
* The other activities can only have a single instance in progress in a
* pool at one time, making the tag unnecessary.
*
* There can be multiple devices being replaced at once, but since they
* all finish once resilvering finishes, we don't bother keeping track
* of them individually, we just wait for them all to finish.
*/
if (use_tag && activity != ZPOOL_WAIT_INITIALIZE &&
activity != ZPOOL_WAIT_TRIM)
return (EINVAL);
if (activity < 0 || activity >= ZPOOL_WAIT_NUM_ACTIVITIES)
return (EINVAL);
spa_t *spa;
int error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
/*
* Increment the spa's waiter count so that we can call spa_close and
* still ensure that the spa_t doesn't get freed before this thread is
* finished with it when the pool is exported. We want to call spa_close
* before we start waiting because otherwise the additional ref would
* prevent the pool from being exported or destroyed throughout the
* potentially long wait.
*/
mutex_enter(&spa->spa_activities_lock);
spa->spa_waiters++;
spa_close(spa, FTAG);
*waited = B_FALSE;
for (;;) {
boolean_t in_progress;
error = spa_activity_in_progress(spa, activity, use_tag, tag,
&in_progress);
if (error || !in_progress || spa->spa_waiters_cancel)
break;
*waited = B_TRUE;
if (cv_wait_sig(&spa->spa_activities_cv,
&spa->spa_activities_lock) == 0) {
error = EINTR;
break;
}
}
spa->spa_waiters--;
cv_signal(&spa->spa_waiters_cv);
mutex_exit(&spa->spa_activities_lock);
return (error);
}
/*
* Wait for a particular instance of the specified activity to complete, where
* the instance is identified by 'tag'
*/
int
spa_wait_tag(const char *pool, zpool_wait_activity_t activity, uint64_t tag,
boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_TRUE, tag, waited));
}
/*
* Wait for all instances of the specified activity complete
*/
int
spa_wait(const char *pool, zpool_wait_activity_t activity, boolean_t *waited)
{
return (spa_wait_common(pool, activity, B_FALSE, 0, waited));
}
sysevent_t *
spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
sysevent_t *ev = NULL;
#ifdef _KERNEL
nvlist_t *resource;
resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
if (resource) {
ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
ev->resource = resource;
}
#endif
return (ev);
}
void
spa_event_post(sysevent_t *ev)
{
#ifdef _KERNEL
if (ev) {
zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
kmem_free(ev, sizeof (*ev));
}
#endif
}
/*
* Post a zevent corresponding to the given sysevent. The 'name' must be one
* of the event definitions in sys/sysevent/eventdefs.h. The payload will be
* filled in from the spa and (optionally) the vdev. This doesn't do anything
* in the userland libzpool, as we don't want consumers to misinterpret ztest
* or zdb as real changes.
*/
void
spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
}
/* state manipulation functions */
EXPORT_SYMBOL(spa_open);
EXPORT_SYMBOL(spa_open_rewind);
EXPORT_SYMBOL(spa_get_stats);
EXPORT_SYMBOL(spa_create);
EXPORT_SYMBOL(spa_import);
EXPORT_SYMBOL(spa_tryimport);
EXPORT_SYMBOL(spa_destroy);
EXPORT_SYMBOL(spa_export);
EXPORT_SYMBOL(spa_reset);
EXPORT_SYMBOL(spa_async_request);
EXPORT_SYMBOL(spa_async_suspend);
EXPORT_SYMBOL(spa_async_resume);
EXPORT_SYMBOL(spa_inject_addref);
EXPORT_SYMBOL(spa_inject_delref);
EXPORT_SYMBOL(spa_scan_stat_init);
EXPORT_SYMBOL(spa_scan_get_stats);
/* device manipulation */
EXPORT_SYMBOL(spa_vdev_add);
EXPORT_SYMBOL(spa_vdev_attach);
EXPORT_SYMBOL(spa_vdev_detach);
EXPORT_SYMBOL(spa_vdev_setpath);
EXPORT_SYMBOL(spa_vdev_setfru);
EXPORT_SYMBOL(spa_vdev_split_mirror);
/* spare statech is global across all pools) */
EXPORT_SYMBOL(spa_spare_add);
EXPORT_SYMBOL(spa_spare_remove);
EXPORT_SYMBOL(spa_spare_exists);
EXPORT_SYMBOL(spa_spare_activate);
/* L2ARC statech is global across all pools) */
EXPORT_SYMBOL(spa_l2cache_add);
EXPORT_SYMBOL(spa_l2cache_remove);
EXPORT_SYMBOL(spa_l2cache_exists);
EXPORT_SYMBOL(spa_l2cache_activate);
EXPORT_SYMBOL(spa_l2cache_drop);
/* scanning */
EXPORT_SYMBOL(spa_scan);
EXPORT_SYMBOL(spa_scan_stop);
/* spa syncing */
EXPORT_SYMBOL(spa_sync); /* only for DMU use */
EXPORT_SYMBOL(spa_sync_allpools);
/* properties */
EXPORT_SYMBOL(spa_prop_set);
EXPORT_SYMBOL(spa_prop_get);
EXPORT_SYMBOL(spa_prop_clear_bootfs);
/* asynchronous event notification */
EXPORT_SYMBOL(spa_event_notify);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_shift, INT, ZMOD_RW,
"log2 fraction of arc that can be used by inflight I/Os when "
"verifying pool during import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_metadata, INT, ZMOD_RW,
"Set to traverse metadata on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_verify_data, INT, ZMOD_RW,
"Set to traverse data on pool import");
ZFS_MODULE_PARAM(zfs_spa, spa_, load_print_vdev_tree, INT, ZMOD_RW,
"Print vdev tree to zfs_dbgmsg during pool import");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_pct, UINT, ZMOD_RD,
"Percentage of CPUs to run an IO worker thread");
ZFS_MODULE_PARAM(zfs_zio, zio_, taskq_batch_tpq, UINT, ZMOD_RD,
"Number of threads per IO worker taskqueue");
ZFS_MODULE_PARAM(zfs, zfs_, max_missing_tvds, ULONG, ZMOD_RW,
"Allow importing pool with up to this number of missing top-level "
"vdevs (in read-only mode)");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_pause, INT, ZMOD_RW,
"Set the livelist condense zthr to pause");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_pause, INT, ZMOD_RW,
"Set the livelist condense synctask to pause");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, sync_cancel, INT, ZMOD_RW,
"Whether livelist condensing was canceled in the synctask");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, zthr_cancel, INT, ZMOD_RW,
"Whether livelist condensing was canceled in the zthr function");
ZFS_MODULE_PARAM(zfs_livelist_condense, zfs_livelist_condense_, new_alloc, INT, ZMOD_RW,
"Whether extra ALLOC blkptrs were added to a livelist entry while it "
"was being condensed");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/vdev_label.c b/sys/contrib/openzfs/module/zfs/vdev_label.c
index cdb4cb6e565f..f03ae0873f6c 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_label.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_label.c
@@ -1,2010 +1,2010 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2020 by Delphix. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
*/
/*
* Virtual Device Labels
* ---------------------
*
* The vdev label serves several distinct purposes:
*
* 1. Uniquely identify this device as part of a ZFS pool and confirm its
* identity within the pool.
*
* 2. Verify that all the devices given in a configuration are present
* within the pool.
*
* 3. Determine the uberblock for the pool.
*
* 4. In case of an import operation, determine the configuration of the
* toplevel vdev of which it is a part.
*
* 5. If an import operation cannot find all the devices in the pool,
* provide enough information to the administrator to determine which
* devices are missing.
*
* It is important to note that while the kernel is responsible for writing the
* label, it only consumes the information in the first three cases. The
* latter information is only consumed in userland when determining the
* configuration to import a pool.
*
*
* Label Organization
* ------------------
*
* Before describing the contents of the label, it's important to understand how
* the labels are written and updated with respect to the uberblock.
*
* When the pool configuration is altered, either because it was newly created
* or a device was added, we want to update all the labels such that we can deal
* with fatal failure at any point. To this end, each disk has two labels which
* are updated before and after the uberblock is synced. Assuming we have
* labels and an uberblock with the following transaction groups:
*
* L1 UB L2
* +------+ +------+ +------+
* | | | | | |
* | t10 | | t10 | | t10 |
* | | | | | |
* +------+ +------+ +------+
*
* In this stable state, the labels and the uberblock were all updated within
* the same transaction group (10). Each label is mirrored and checksummed, so
* that we can detect when we fail partway through writing the label.
*
* In order to identify which labels are valid, the labels are written in the
* following manner:
*
* 1. For each vdev, update 'L1' to the new label
* 2. Update the uberblock
* 3. For each vdev, update 'L2' to the new label
*
* Given arbitrary failure, we can determine the correct label to use based on
* the transaction group. If we fail after updating L1 but before updating the
* UB, we will notice that L1's transaction group is greater than the uberblock,
* so L2 must be valid. If we fail after writing the uberblock but before
* writing L2, we will notice that L2's transaction group is less than L1, and
* therefore L1 is valid.
*
* Another added complexity is that not every label is updated when the config
* is synced. If we add a single device, we do not want to have to re-write
* every label for every device in the pool. This means that both L1 and L2 may
* be older than the pool uberblock, because the necessary information is stored
* on another vdev.
*
*
* On-disk Format
* --------------
*
* The vdev label consists of two distinct parts, and is wrapped within the
* vdev_label_t structure. The label includes 8k of padding to permit legacy
* VTOC disk labels, but is otherwise ignored.
*
* The first half of the label is a packed nvlist which contains pool wide
* properties, per-vdev properties, and configuration information. It is
* described in more detail below.
*
* The latter half of the label consists of a redundant array of uberblocks.
* These uberblocks are updated whenever a transaction group is committed,
* or when the configuration is updated. When a pool is loaded, we scan each
* vdev for the 'best' uberblock.
*
*
* Configuration Information
* -------------------------
*
* The nvlist describing the pool and vdev contains the following elements:
*
* version ZFS on-disk version
* name Pool name
* state Pool state
* txg Transaction group in which this label was written
* pool_guid Unique identifier for this pool
* vdev_tree An nvlist describing vdev tree.
* features_for_read
* An nvlist of the features necessary for reading the MOS.
*
* Each leaf device label also contains the following:
*
* top_guid Unique ID for top-level vdev in which this is contained
* guid Unique ID for the leaf vdev
*
* The 'vs' configuration follows the format described in 'spa_config.c'.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_draid.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/zio.h>
#include <sys/dsl_scan.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
#include <sys/byteorder.h>
#include <sys/zfs_bootenv.h>
/*
* Basic routines to read and write from a vdev label.
* Used throughout the rest of this file.
*/
uint64_t
vdev_label_offset(uint64_t psize, int l, uint64_t offset)
{
ASSERT(offset < sizeof (vdev_label_t));
ASSERT(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t) == 0);
return (offset + l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
}
/*
* Returns back the vdev label associated with the passed in offset.
*/
int
vdev_label_number(uint64_t psize, uint64_t offset)
{
int l;
if (offset >= psize - VDEV_LABEL_END_SIZE) {
offset -= psize - VDEV_LABEL_END_SIZE;
offset += (VDEV_LABELS / 2) * sizeof (vdev_label_t);
}
l = offset / sizeof (vdev_label_t);
return (l < VDEV_LABELS ? l : -1);
}
static void
vdev_label_read(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
uint64_t size, zio_done_func_t *done, void *private, int flags)
{
ASSERT(
spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
zio_nowait(zio_read_phys(zio, vd,
vdev_label_offset(vd->vdev_psize, l, offset),
size, buf, ZIO_CHECKSUM_LABEL, done, private,
ZIO_PRIORITY_SYNC_READ, flags, B_TRUE));
}
void
vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
uint64_t size, zio_done_func_t *done, void *private, int flags)
{
ASSERT(
spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
zio_nowait(zio_write_phys(zio, vd,
vdev_label_offset(vd->vdev_psize, l, offset),
size, buf, ZIO_CHECKSUM_LABEL, done, private,
ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE));
}
/*
* Generate the nvlist representing this vdev's stats
*/
void
vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
{
nvlist_t *nvx;
vdev_stat_t *vs;
vdev_stat_ex_t *vsx;
vs = kmem_alloc(sizeof (*vs), KM_SLEEP);
vsx = kmem_alloc(sizeof (*vsx), KM_SLEEP);
vdev_get_stats_ex(vd, vs, vsx);
fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t *)vs, sizeof (*vs) / sizeof (uint64_t));
/*
* Add extended stats into a special extended stats nvlist. This keeps
* all the extended stats nicely grouped together. The extended stats
* nvlist is then added to the main nvlist.
*/
nvx = fnvlist_alloc();
/* ZIOs in flight to disk */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_SCRUB]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_TRIM]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_REBUILD_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_REBUILD]);
/* ZIOs pending */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SCRUB]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_TRIM_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_TRIM]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_REBUILD_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_REBUILD]);
/* Histograms */
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
vsx->vsx_total_histo[ZIO_TYPE_READ],
ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
vsx->vsx_total_histo[ZIO_TYPE_WRITE],
ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
vsx->vsx_disk_histo[ZIO_TYPE_READ],
ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
vsx->vsx_disk_histo[ZIO_TYPE_WRITE],
ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TRIM_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_TRIM]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_REBUILD_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_REBUILD],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_REBUILD]));
/* Request sizes */
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_TRIM_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_TRIM]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_REBUILD_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_REBUILD],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_REBUILD]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_TRIM_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_TRIM]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_REBUILD_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_REBUILD],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_REBUILD]));
/* IO delays */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SLOW_IOS, vs->vs_slow_ios);
/* Add extended stats nvlist to main nvlist */
fnvlist_add_nvlist(nv, ZPOOL_CONFIG_VDEV_STATS_EX, nvx);
fnvlist_free(nvx);
kmem_free(vs, sizeof (*vs));
kmem_free(vsx, sizeof (*vsx));
}
static void
root_vdev_actions_getprogress(vdev_t *vd, nvlist_t *nvl)
{
spa_t *spa = vd->vdev_spa;
if (vd != spa->spa_root_vdev)
return;
/* provide either current or previous scan information */
pool_scan_stat_t ps;
if (spa_scan_get_stats(spa, &ps) == 0) {
fnvlist_add_uint64_array(nvl,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps,
sizeof (pool_scan_stat_t) / sizeof (uint64_t));
}
pool_removal_stat_t prs;
if (spa_removal_get_stats(spa, &prs) == 0) {
fnvlist_add_uint64_array(nvl,
ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t *)&prs,
sizeof (prs) / sizeof (uint64_t));
}
pool_checkpoint_stat_t pcs;
if (spa_checkpoint_get_stats(spa, &pcs) == 0) {
fnvlist_add_uint64_array(nvl,
ZPOOL_CONFIG_CHECKPOINT_STATS, (uint64_t *)&pcs,
sizeof (pcs) / sizeof (uint64_t));
}
}
static void
top_vdev_actions_getprogress(vdev_t *vd, nvlist_t *nvl)
{
if (vd == vd->vdev_top) {
vdev_rebuild_stat_t vrs;
if (vdev_rebuild_get_stats(vd, &vrs) == 0) {
fnvlist_add_uint64_array(nvl,
ZPOOL_CONFIG_REBUILD_STATS, (uint64_t *)&vrs,
sizeof (vrs) / sizeof (uint64_t));
}
}
}
/*
* Generate the nvlist representing this vdev's config.
*/
nvlist_t *
vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
vdev_config_flag_t flags)
{
nvlist_t *nv = NULL;
vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
nv = fnvlist_alloc();
fnvlist_add_string(nv, ZPOOL_CONFIG_TYPE, vd->vdev_ops->vdev_op_type);
if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)))
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid);
if (vd->vdev_path != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_PATH, vd->vdev_path);
if (vd->vdev_devid != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vd->vdev_devid);
if (vd->vdev_physpath != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH,
vd->vdev_physpath);
if (vd->vdev_enc_sysfs_path != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
vd->vdev_enc_sysfs_path);
if (vd->vdev_fru != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_FRU, vd->vdev_fru);
if (vd->vdev_ops->vdev_op_config_generate != NULL)
vd->vdev_ops->vdev_op_config_generate(vd, nv);
if (vd->vdev_wholedisk != -1ULL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
vd->vdev_wholedisk);
}
if (vd->vdev_not_present && !(flags & VDEV_CONFIG_MISSING))
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1);
if (vd->vdev_isspare)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)) &&
vd == vd->vdev_top) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
vd->vdev_ms_array);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
vd->vdev_ms_shift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
vd->vdev_asize);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog);
if (vd->vdev_removing) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING,
vd->vdev_removing);
}
/* zpool command expects alloc class data */
if (getstats && vd->vdev_alloc_bias != VDEV_BIAS_NONE) {
const char *bias = NULL;
switch (vd->vdev_alloc_bias) {
case VDEV_BIAS_LOG:
bias = VDEV_ALLOC_BIAS_LOG;
break;
case VDEV_BIAS_SPECIAL:
bias = VDEV_ALLOC_BIAS_SPECIAL;
break;
case VDEV_BIAS_DEDUP:
bias = VDEV_ALLOC_BIAS_DEDUP;
break;
default:
ASSERT3U(vd->vdev_alloc_bias, ==,
VDEV_BIAS_NONE);
}
fnvlist_add_string(nv, ZPOOL_CONFIG_ALLOCATION_BIAS,
bias);
}
}
if (vd->vdev_dtl_sm != NULL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DTL,
space_map_object(vd->vdev_dtl_sm));
}
if (vic->vic_mapping_object != 0) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
vic->vic_mapping_object);
}
if (vic->vic_births_object != 0) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
vic->vic_births_object);
}
if (vic->vic_prev_indirect_vdev != UINT64_MAX) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
vic->vic_prev_indirect_vdev);
}
if (vd->vdev_crtxg)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
if (vd->vdev_expansion_time)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_EXPANSION_TIME,
vd->vdev_expansion_time);
if (flags & VDEV_CONFIG_MOS) {
if (vd->vdev_leaf_zap != 0) {
ASSERT(vd->vdev_ops->vdev_op_leaf);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_LEAF_ZAP,
vd->vdev_leaf_zap);
}
if (vd->vdev_top_zap != 0) {
ASSERT(vd == vd->vdev_top);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
vd->vdev_top_zap);
}
if (vd->vdev_resilver_deferred) {
ASSERT(vd->vdev_ops->vdev_op_leaf);
ASSERT(spa->spa_resilver_deferred);
fnvlist_add_boolean(nv, ZPOOL_CONFIG_RESILVER_DEFER);
}
}
if (getstats) {
vdev_config_generate_stats(vd, nv);
root_vdev_actions_getprogress(vd, nv);
top_vdev_actions_getprogress(vd, nv);
/*
* Note: this can be called from open context
* (spa_get_stats()), so we need the rwlock to prevent
* the mapping from being changed by condensing.
*/
rw_enter(&vd->vdev_indirect_rwlock, RW_READER);
if (vd->vdev_indirect_mapping != NULL) {
ASSERT(vd->vdev_indirect_births != NULL);
vdev_indirect_mapping_t *vim =
vd->vdev_indirect_mapping;
fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
vdev_indirect_mapping_size(vim));
}
rw_exit(&vd->vdev_indirect_rwlock);
if (vd->vdev_mg != NULL &&
vd->vdev_mg->mg_fragmentation != ZFS_FRAG_INVALID) {
/*
* Compute approximately how much memory would be used
* for the indirect mapping if this device were to
* be removed.
*
* Note: If the frag metric is invalid, then not
* enough metaslabs have been converted to have
* histograms.
*/
uint64_t seg_count = 0;
uint64_t to_alloc = vd->vdev_stat.vs_alloc;
/*
* There are the same number of allocated segments
* as free segments, so we will have at least one
* entry per free segment. However, small free
* segments (smaller than vdev_removal_max_span)
* will be combined with adjacent allocated segments
* as a single mapping.
*/
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
if (i + 1 < highbit64(vdev_removal_max_span)
- 1) {
to_alloc +=
vd->vdev_mg->mg_histogram[i] <<
(i + 1);
} else {
seg_count +=
vd->vdev_mg->mg_histogram[i];
}
}
/*
* The maximum length of a mapping is
* zfs_remove_max_segment, so we need at least one entry
* per zfs_remove_max_segment of allocated data.
*/
seg_count += to_alloc / spa_remove_max_segment(spa);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
seg_count *
sizeof (vdev_indirect_mapping_entry_phys_t));
}
}
if (!vd->vdev_ops->vdev_op_leaf) {
nvlist_t **child;
int c, idx;
ASSERT(!vd->vdev_ishole);
child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *),
KM_SLEEP);
for (c = 0, idx = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
/*
* If we're generating an nvlist of removing
* vdevs then skip over any device which is
* not being removed.
*/
if ((flags & VDEV_CONFIG_REMOVING) &&
!cvd->vdev_removing)
continue;
child[idx++] = vdev_config_generate(spa, cvd,
getstats, flags);
}
if (idx) {
fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
child, idx);
}
for (c = 0; c < idx; c++)
nvlist_free(child[c]);
kmem_free(child, vd->vdev_children * sizeof (nvlist_t *));
} else {
const char *aux = NULL;
if (vd->vdev_offline && !vd->vdev_tmpoffline)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE, B_TRUE);
if (vd->vdev_resilver_txg != 0)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
vd->vdev_resilver_txg);
if (vd->vdev_rebuild_txg != 0)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_REBUILD_TXG,
vd->vdev_rebuild_txg);
if (vd->vdev_faulted)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED, B_TRUE);
if (vd->vdev_degraded)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED, B_TRUE);
if (vd->vdev_removed)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED, B_TRUE);
if (vd->vdev_unspare)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE, B_TRUE);
if (vd->vdev_ishole)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_HOLE, B_TRUE);
/* Set the reason why we're FAULTED/DEGRADED. */
switch (vd->vdev_stat.vs_aux) {
case VDEV_AUX_ERR_EXCEEDED:
aux = "err_exceeded";
break;
case VDEV_AUX_EXTERNAL:
aux = "external";
break;
}
if (aux != NULL && !vd->vdev_tmpoffline) {
fnvlist_add_string(nv, ZPOOL_CONFIG_AUX_STATE, aux);
} else {
/*
* We're healthy - clear any previous AUX_STATE values.
*/
if (nvlist_exists(nv, ZPOOL_CONFIG_AUX_STATE))
nvlist_remove_all(nv, ZPOOL_CONFIG_AUX_STATE);
}
if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ORIG_GUID,
vd->vdev_orig_guid);
}
}
return (nv);
}
/*
* Generate a view of the top-level vdevs. If we currently have holes
* in the namespace, then generate an array which contains a list of holey
* vdevs. Additionally, add the number of top-level children that currently
* exist.
*/
void
vdev_top_config_generate(spa_t *spa, nvlist_t *config)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t *array;
uint_t c, idx;
array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP);
for (c = 0, idx = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_ishole) {
array[idx++] = c;
}
}
if (idx) {
VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY,
array, idx) == 0);
}
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
rvd->vdev_children) == 0);
kmem_free(array, rvd->vdev_children * sizeof (uint64_t));
}
/*
* Returns the configuration from the label of the given vdev. For vdevs
* which don't have a txg value stored on their label (i.e. spares/cache)
* or have not been completely initialized (txg = 0) just return
* the configuration from the first valid label we find. Otherwise,
* find the most up-to-date label that does not exceed the specified
* 'txg' value.
*/
nvlist_t *
vdev_label_read_config(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
nvlist_t *config = NULL;
vdev_phys_t *vp[VDEV_LABELS];
abd_t *vp_abd[VDEV_LABELS];
zio_t *zio[VDEV_LABELS];
uint64_t best_txg = 0;
uint64_t label_txg = 0;
int error = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE;
ASSERT(vd->vdev_validate_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (!vdev_readable(vd))
return (NULL);
/*
* The label for a dRAID distributed spare is not stored on disk.
* Instead it is generated when needed which allows us to bypass
* the pipeline when reading the config from the label.
*/
if (vd->vdev_ops == &vdev_draid_spare_ops)
return (vdev_draid_read_config_spare(vd));
for (int l = 0; l < VDEV_LABELS; l++) {
vp_abd[l] = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
vp[l] = abd_to_buf(vp_abd[l]);
}
retry:
for (int l = 0; l < VDEV_LABELS; l++) {
zio[l] = zio_root(spa, NULL, NULL, flags);
vdev_label_read(zio[l], vd, l, vp_abd[l],
offsetof(vdev_label_t, vl_vdev_phys), sizeof (vdev_phys_t),
NULL, NULL, flags);
}
for (int l = 0; l < VDEV_LABELS; l++) {
nvlist_t *label = NULL;
if (zio_wait(zio[l]) == 0 &&
nvlist_unpack(vp[l]->vp_nvlist, sizeof (vp[l]->vp_nvlist),
&label, 0) == 0) {
/*
* Auxiliary vdevs won't have txg values in their
* labels and newly added vdevs may not have been
* completely initialized so just return the
* configuration from the first valid label we
* encounter.
*/
error = nvlist_lookup_uint64(label,
ZPOOL_CONFIG_POOL_TXG, &label_txg);
if ((error || label_txg == 0) && !config) {
config = label;
for (l++; l < VDEV_LABELS; l++)
zio_wait(zio[l]);
break;
} else if (label_txg <= txg && label_txg > best_txg) {
best_txg = label_txg;
nvlist_free(config);
config = fnvlist_dup(label);
}
}
if (label != NULL) {
nvlist_free(label);
label = NULL;
}
}
if (config == NULL && !(flags & ZIO_FLAG_TRYHARD)) {
flags |= ZIO_FLAG_TRYHARD;
goto retry;
}
/*
* We found a valid label but it didn't pass txg restrictions.
*/
if (config == NULL && label_txg != 0) {
vdev_dbgmsg(vd, "label discarded as txg is too large "
"(%llu > %llu)", (u_longlong_t)label_txg,
(u_longlong_t)txg);
}
for (int l = 0; l < VDEV_LABELS; l++) {
abd_free(vp_abd[l]);
}
return (config);
}
/*
* Determine if a device is in use. The 'spare_guid' parameter will be filled
* in with the device guid if this spare is active elsewhere on the system.
*/
static boolean_t
vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason,
uint64_t *spare_guid, uint64_t *l2cache_guid)
{
spa_t *spa = vd->vdev_spa;
uint64_t state, pool_guid, device_guid, txg, spare_pool;
uint64_t vdtxg = 0;
nvlist_t *label;
if (spare_guid)
*spare_guid = 0ULL;
if (l2cache_guid)
*l2cache_guid = 0ULL;
/*
* Read the label, if any, and perform some basic sanity checks.
*/
if ((label = vdev_label_read_config(vd, -1ULL)) == NULL)
return (B_FALSE);
(void) nvlist_lookup_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
&vdtxg);
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
&device_guid) != 0) {
nvlist_free(label);
return (B_FALSE);
}
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) != 0 ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0)) {
nvlist_free(label);
return (B_FALSE);
}
nvlist_free(label);
/*
* Check to see if this device indeed belongs to the pool it claims to
* be a part of. The only way this is allowed is if the device is a hot
* spare (which we check for later on).
*/
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
!spa_guid_exists(pool_guid, device_guid) &&
!spa_spare_exists(device_guid, NULL, NULL) &&
!spa_l2cache_exists(device_guid, NULL))
return (B_FALSE);
/*
* If the transaction group is zero, then this an initialized (but
* unused) label. This is only an error if the create transaction
* on-disk is the same as the one we're using now, in which case the
* user has attempted to add the same vdev multiple times in the same
* transaction.
*/
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
txg == 0 && vdtxg == crtxg)
return (B_TRUE);
/*
* Check to see if this is a spare device. We do an explicit check for
* spa_has_spare() here because it may be on our pending list of spares
* to add. We also check if it is an l2cache device.
*/
if (spa_spare_exists(device_guid, &spare_pool, NULL) ||
spa_has_spare(spa, device_guid)) {
if (spare_guid)
*spare_guid = device_guid;
switch (reason) {
case VDEV_LABEL_CREATE:
case VDEV_LABEL_L2CACHE:
return (B_TRUE);
case VDEV_LABEL_REPLACE:
return (!spa_has_spare(spa, device_guid) ||
spare_pool != 0ULL);
case VDEV_LABEL_SPARE:
return (spa_has_spare(spa, device_guid));
default:
break;
}
}
/*
* Check to see if this is an l2cache device.
*/
if (spa_l2cache_exists(device_guid, NULL))
return (B_TRUE);
/*
* We can't rely on a pool's state if it's been imported
* read-only. Instead we look to see if the pools is marked
* read-only in the namespace and set the state to active.
*/
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(spa = spa_by_guid(pool_guid, device_guid)) != NULL &&
spa_mode(spa) == SPA_MODE_READ)
state = POOL_STATE_ACTIVE;
/*
* If the device is marked ACTIVE, then this device is in use by another
* pool on the system.
*/
return (state == POOL_STATE_ACTIVE);
}
/*
* Initialize a vdev label. We check to make sure each leaf device is not in
* use, and writable. We put down an initial label which we will later
* overwrite with a complete label. Note that it's important to do this
* sequentially, not in parallel, so that we catch cases of multiple use of the
* same leaf vdev in the vdev we're creating -- e.g. mirroring a disk with
* itself.
*/
int
vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
{
spa_t *spa = vd->vdev_spa;
nvlist_t *label;
vdev_phys_t *vp;
abd_t *vp_abd;
abd_t *bootenv;
uberblock_t *ub;
abd_t *ub_abd;
zio_t *zio;
char *buf;
size_t buflen;
int error;
uint64_t spare_guid = 0, l2cache_guid = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
for (int c = 0; c < vd->vdev_children; c++)
if ((error = vdev_label_init(vd->vdev_child[c],
crtxg, reason)) != 0)
return (error);
/* Track the creation time for this vdev */
vd->vdev_crtxg = crtxg;
if (!vd->vdev_ops->vdev_op_leaf || !spa_writeable(spa))
return (0);
/*
* Dead vdevs cannot be initialized.
*/
if (vdev_is_dead(vd))
return (SET_ERROR(EIO));
/*
* Determine if the vdev is in use.
*/
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPLIT &&
vdev_inuse(vd, crtxg, reason, &spare_guid, &l2cache_guid))
return (SET_ERROR(EBUSY));
/*
* If this is a request to add or replace a spare or l2cache device
* that is in use elsewhere on the system, then we must update the
* guid (which was initialized to a random value) to reflect the
* actual GUID (which is shared between multiple pools).
*/
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_L2CACHE &&
spare_guid != 0ULL) {
uint64_t guid_delta = spare_guid - vd->vdev_guid;
vd->vdev_guid += guid_delta;
for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += guid_delta;
/*
* If this is a replacement, then we want to fallthrough to the
* rest of the code. If we're adding a spare, then it's already
* labeled appropriately and we can just return.
*/
if (reason == VDEV_LABEL_SPARE)
return (0);
ASSERT(reason == VDEV_LABEL_REPLACE ||
reason == VDEV_LABEL_SPLIT);
}
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPARE &&
l2cache_guid != 0ULL) {
uint64_t guid_delta = l2cache_guid - vd->vdev_guid;
vd->vdev_guid += guid_delta;
for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += guid_delta;
/*
* If this is a replacement, then we want to fallthrough to the
* rest of the code. If we're adding an l2cache, then it's
* already labeled appropriately and we can just return.
*/
if (reason == VDEV_LABEL_L2CACHE)
return (0);
ASSERT(reason == VDEV_LABEL_REPLACE);
}
/*
* Initialize its label.
*/
vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
abd_zero(vp_abd, sizeof (vdev_phys_t));
vp = abd_to_buf(vp_abd);
/*
* Generate a label describing the pool and our top-level vdev.
* We mark it as being from txg 0 to indicate that it's not
* really part of an active pool just yet. The labels will
* be written again with a meaningful txg by spa_sync().
*/
if (reason == VDEV_LABEL_SPARE ||
(reason == VDEV_LABEL_REMOVE && vd->vdev_isspare)) {
/*
* For inactive hot spares, we generate a special label that
* identifies as a mutually shared hot spare. We write the
* label if we are adding a hot spare, or if we are removing an
* active hot spare (in which case we want to revert the
* labels).
*/
VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
spa_version(spa)) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE,
POOL_STATE_SPARE) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID,
vd->vdev_guid) == 0);
} else if (reason == VDEV_LABEL_L2CACHE ||
(reason == VDEV_LABEL_REMOVE && vd->vdev_isl2cache)) {
/*
* For level 2 ARC devices, add a special label.
*/
VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
spa_version(spa)) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE,
POOL_STATE_L2CACHE) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID,
vd->vdev_guid) == 0);
} else {
uint64_t txg = 0ULL;
if (reason == VDEV_LABEL_SPLIT)
txg = spa->spa_uberblock.ub_txg;
label = spa_config_generate(spa, vd, txg, B_FALSE);
/*
* Add our creation time. This allows us to detect multiple
* vdev uses as described above, and automatically expires if we
* fail.
*/
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
crtxg) == 0);
}
buf = vp->vp_nvlist;
buflen = sizeof (vp->vp_nvlist);
error = nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP);
if (error != 0) {
nvlist_free(label);
abd_free(vp_abd);
/* EFAULT means nvlist_pack ran out of room */
return (SET_ERROR(error == EFAULT ? ENAMETOOLONG : EINVAL));
}
/*
* Initialize uberblock template.
*/
ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_RING, B_TRUE);
abd_zero(ub_abd, VDEV_UBERBLOCK_RING);
abd_copy_from_buf(ub_abd, &spa->spa_uberblock, sizeof (uberblock_t));
ub = abd_to_buf(ub_abd);
ub->ub_txg = 0;
/* Initialize the 2nd padding area. */
bootenv = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
abd_zero(bootenv, VDEV_PAD_SIZE);
/*
* Write everything in parallel.
*/
retry:
zio = zio_root(spa, NULL, NULL, flags);
for (int l = 0; l < VDEV_LABELS; l++) {
vdev_label_write(zio, vd, l, vp_abd,
offsetof(vdev_label_t, vl_vdev_phys),
sizeof (vdev_phys_t), NULL, NULL, flags);
/*
* Skip the 1st padding area.
* Zero out the 2nd padding area where it might have
* left over data from previous filesystem format.
*/
vdev_label_write(zio, vd, l, bootenv,
offsetof(vdev_label_t, vl_be),
VDEV_PAD_SIZE, NULL, NULL, flags);
vdev_label_write(zio, vd, l, ub_abd,
offsetof(vdev_label_t, vl_uberblock),
VDEV_UBERBLOCK_RING, NULL, NULL, flags);
}
error = zio_wait(zio);
if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) {
flags |= ZIO_FLAG_TRYHARD;
goto retry;
}
nvlist_free(label);
abd_free(bootenv);
abd_free(ub_abd);
abd_free(vp_abd);
/*
* If this vdev hasn't been previously identified as a spare, then we
* mark it as such only if a) we are labeling it as a spare, or b) it
* exists as a spare elsewhere in the system. Do the same for
* level 2 ARC devices.
*/
if (error == 0 && !vd->vdev_isspare &&
(reason == VDEV_LABEL_SPARE ||
spa_spare_exists(vd->vdev_guid, NULL, NULL)))
spa_spare_add(vd);
if (error == 0 && !vd->vdev_isl2cache &&
(reason == VDEV_LABEL_L2CACHE ||
spa_l2cache_exists(vd->vdev_guid, NULL)))
spa_l2cache_add(vd);
return (error);
}
/*
* Done callback for vdev_label_read_bootenv_impl. If this is the first
* callback to finish, store our abd in the callback pointer. Otherwise, we
* just free our abd and return.
*/
static void
vdev_label_read_bootenv_done(zio_t *zio)
{
zio_t *rio = zio->io_private;
abd_t **cbp = rio->io_private;
ASSERT3U(zio->io_size, ==, VDEV_PAD_SIZE);
if (zio->io_error == 0) {
mutex_enter(&rio->io_lock);
if (*cbp == NULL) {
/* Will free this buffer in vdev_label_read_bootenv. */
*cbp = zio->io_abd;
} else {
abd_free(zio->io_abd);
}
mutex_exit(&rio->io_lock);
} else {
abd_free(zio->io_abd);
}
}
static void
vdev_label_read_bootenv_impl(zio_t *zio, vdev_t *vd, int flags)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_label_read_bootenv_impl(zio, vd->vdev_child[c], flags);
/*
* We just use the first label that has a correct checksum; the
* bootloader should have rewritten them all to be the same on boot,
* and any changes we made since boot have been the same across all
* labels.
*/
if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
for (int l = 0; l < VDEV_LABELS; l++) {
vdev_label_read(zio, vd, l,
abd_alloc_linear(VDEV_PAD_SIZE, B_FALSE),
offsetof(vdev_label_t, vl_be), VDEV_PAD_SIZE,
vdev_label_read_bootenv_done, zio, flags);
}
}
}
int
vdev_label_read_bootenv(vdev_t *rvd, nvlist_t *bootenv)
{
nvlist_t *config;
spa_t *spa = rvd->vdev_spa;
abd_t *abd = NULL;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
ASSERT(bootenv);
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
zio_t *zio = zio_root(spa, NULL, &abd, flags);
vdev_label_read_bootenv_impl(zio, rvd, flags);
int err = zio_wait(zio);
if (abd != NULL) {
char *buf;
vdev_boot_envblock_t *vbe = abd_to_buf(abd);
vbe->vbe_version = ntohll(vbe->vbe_version);
switch (vbe->vbe_version) {
case VB_RAW:
/*
* if we have textual data in vbe_bootenv, create nvlist
* with key "envmap".
*/
fnvlist_add_uint64(bootenv, BOOTENV_VERSION, VB_RAW);
vbe->vbe_bootenv[sizeof (vbe->vbe_bootenv) - 1] = '\0';
fnvlist_add_string(bootenv, GRUB_ENVMAP,
vbe->vbe_bootenv);
break;
case VB_NVLIST:
err = nvlist_unpack(vbe->vbe_bootenv,
sizeof (vbe->vbe_bootenv), &config, 0);
if (err == 0) {
fnvlist_merge(bootenv, config);
nvlist_free(config);
break;
}
- /* FALLTHROUGH */
+ fallthrough;
default:
/* Check for FreeBSD zfs bootonce command string */
buf = abd_to_buf(abd);
if (*buf == '\0') {
fnvlist_add_uint64(bootenv, BOOTENV_VERSION,
VB_NVLIST);
break;
}
fnvlist_add_string(bootenv, FREEBSD_BOOTONCE, buf);
}
/*
* abd was allocated in vdev_label_read_bootenv_impl()
*/
abd_free(abd);
/*
* If we managed to read any successfully,
* return success.
*/
return (0);
}
return (err);
}
int
vdev_label_write_bootenv(vdev_t *vd, nvlist_t *env)
{
zio_t *zio;
spa_t *spa = vd->vdev_spa;
vdev_boot_envblock_t *bootenv;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
int error;
size_t nvsize;
char *nvbuf;
error = nvlist_size(env, &nvsize, NV_ENCODE_XDR);
if (error != 0)
return (SET_ERROR(error));
if (nvsize >= sizeof (bootenv->vbe_bootenv)) {
return (SET_ERROR(E2BIG));
}
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
error = ENXIO;
for (int c = 0; c < vd->vdev_children; c++) {
int child_err;
child_err = vdev_label_write_bootenv(vd->vdev_child[c], env);
/*
* As long as any of the disks managed to write all of their
* labels successfully, return success.
*/
if (child_err == 0)
error = child_err;
}
if (!vd->vdev_ops->vdev_op_leaf || vdev_is_dead(vd) ||
!vdev_writeable(vd)) {
return (error);
}
ASSERT3U(sizeof (*bootenv), ==, VDEV_PAD_SIZE);
abd_t *abd = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
abd_zero(abd, VDEV_PAD_SIZE);
bootenv = abd_borrow_buf_copy(abd, VDEV_PAD_SIZE);
nvbuf = bootenv->vbe_bootenv;
nvsize = sizeof (bootenv->vbe_bootenv);
bootenv->vbe_version = fnvlist_lookup_uint64(env, BOOTENV_VERSION);
switch (bootenv->vbe_version) {
case VB_RAW:
if (nvlist_lookup_string(env, GRUB_ENVMAP, &nvbuf) == 0) {
(void) strlcpy(bootenv->vbe_bootenv, nvbuf, nvsize);
}
error = 0;
break;
case VB_NVLIST:
error = nvlist_pack(env, &nvbuf, &nvsize, NV_ENCODE_XDR,
KM_SLEEP);
break;
default:
error = EINVAL;
break;
}
if (error == 0) {
bootenv->vbe_version = htonll(bootenv->vbe_version);
abd_return_buf_copy(abd, bootenv, VDEV_PAD_SIZE);
} else {
abd_free(abd);
return (SET_ERROR(error));
}
retry:
zio = zio_root(spa, NULL, NULL, flags);
for (int l = 0; l < VDEV_LABELS; l++) {
vdev_label_write(zio, vd, l, abd,
offsetof(vdev_label_t, vl_be),
VDEV_PAD_SIZE, NULL, NULL, flags);
}
error = zio_wait(zio);
if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) {
flags |= ZIO_FLAG_TRYHARD;
goto retry;
}
abd_free(abd);
return (error);
}
/*
* ==========================================================================
* uberblock load/sync
* ==========================================================================
*/
/*
* Consider the following situation: txg is safely synced to disk. We've
* written the first uberblock for txg + 1, and then we lose power. When we
* come back up, we fail to see the uberblock for txg + 1 because, say,
* it was on a mirrored device and the replica to which we wrote txg + 1
* is now offline. If we then make some changes and sync txg + 1, and then
* the missing replica comes back, then for a few seconds we'll have two
* conflicting uberblocks on disk with the same txg. The solution is simple:
* among uberblocks with equal txg, choose the one with the latest timestamp.
*/
static int
vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2)
{
int cmp = TREE_CMP(ub1->ub_txg, ub2->ub_txg);
if (likely(cmp))
return (cmp);
cmp = TREE_CMP(ub1->ub_timestamp, ub2->ub_timestamp);
if (likely(cmp))
return (cmp);
/*
* If MMP_VALID(ub) && MMP_SEQ_VALID(ub) then the host has an MMP-aware
* ZFS, e.g. OpenZFS >= 0.7.
*
* If one ub has MMP and the other does not, they were written by
* different hosts, which matters for MMP. So we treat no MMP/no SEQ as
* a 0 value.
*
* Since timestamp and txg are the same if we get this far, either is
* acceptable for importing the pool.
*/
unsigned int seq1 = 0;
unsigned int seq2 = 0;
if (MMP_VALID(ub1) && MMP_SEQ_VALID(ub1))
seq1 = MMP_SEQ(ub1);
if (MMP_VALID(ub2) && MMP_SEQ_VALID(ub2))
seq2 = MMP_SEQ(ub2);
return (TREE_CMP(seq1, seq2));
}
struct ubl_cbdata {
uberblock_t *ubl_ubbest; /* Best uberblock */
vdev_t *ubl_vd; /* vdev associated with the above */
};
static void
vdev_uberblock_load_done(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
spa_t *spa = zio->io_spa;
zio_t *rio = zio->io_private;
uberblock_t *ub = abd_to_buf(zio->io_abd);
struct ubl_cbdata *cbp = rio->io_private;
ASSERT3U(zio->io_size, ==, VDEV_UBERBLOCK_SIZE(vd));
if (zio->io_error == 0 && uberblock_verify(ub) == 0) {
mutex_enter(&rio->io_lock);
if (ub->ub_txg <= spa->spa_load_max_txg &&
vdev_uberblock_compare(ub, cbp->ubl_ubbest) > 0) {
/*
* Keep track of the vdev in which this uberblock
* was found. We will use this information later
* to obtain the config nvlist associated with
* this uberblock.
*/
*cbp->ubl_ubbest = *ub;
cbp->ubl_vd = vd;
}
mutex_exit(&rio->io_lock);
}
abd_free(zio->io_abd);
}
static void
vdev_uberblock_load_impl(zio_t *zio, vdev_t *vd, int flags,
struct ubl_cbdata *cbp)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_uberblock_load_impl(zio, vd->vdev_child[c], flags, cbp);
if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd) &&
vd->vdev_ops != &vdev_draid_spare_ops) {
for (int l = 0; l < VDEV_LABELS; l++) {
for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
vdev_label_read(zio, vd, l,
abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd),
B_TRUE), VDEV_UBERBLOCK_OFFSET(vd, n),
VDEV_UBERBLOCK_SIZE(vd),
vdev_uberblock_load_done, zio, flags);
}
}
}
}
/*
* Reads the 'best' uberblock from disk along with its associated
* configuration. First, we read the uberblock array of each label of each
* vdev, keeping track of the uberblock with the highest txg in each array.
* Then, we read the configuration from the same vdev as the best uberblock.
*/
void
vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config)
{
zio_t *zio;
spa_t *spa = rvd->vdev_spa;
struct ubl_cbdata cb;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
ASSERT(ub);
ASSERT(config);
bzero(ub, sizeof (uberblock_t));
*config = NULL;
cb.ubl_ubbest = ub;
cb.ubl_vd = NULL;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
zio = zio_root(spa, NULL, &cb, flags);
vdev_uberblock_load_impl(zio, rvd, flags, &cb);
(void) zio_wait(zio);
/*
* It's possible that the best uberblock was discovered on a label
* that has a configuration which was written in a future txg.
* Search all labels on this vdev to find the configuration that
* matches the txg for our uberblock.
*/
if (cb.ubl_vd != NULL) {
vdev_dbgmsg(cb.ubl_vd, "best uberblock found for spa %s. "
"txg %llu", spa->spa_name, (u_longlong_t)ub->ub_txg);
*config = vdev_label_read_config(cb.ubl_vd, ub->ub_txg);
if (*config == NULL && spa->spa_extreme_rewind) {
vdev_dbgmsg(cb.ubl_vd, "failed to read label config. "
"Trying again without txg restrictions.");
*config = vdev_label_read_config(cb.ubl_vd, UINT64_MAX);
}
if (*config == NULL) {
vdev_dbgmsg(cb.ubl_vd, "failed to read label config");
}
}
spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
* For use when a leaf vdev is expanded.
* The location of labels 2 and 3 changed, and at the new location the
* uberblock rings are either empty or contain garbage. The sync will write
* new configs there because the vdev is dirty, but expansion also needs the
* uberblock rings copied. Read them from label 0 which did not move.
*
* Since the point is to populate labels {2,3} with valid uberblocks,
* we zero uberblocks we fail to read or which are not valid.
*/
static void
vdev_copy_uberblocks(vdev_t *vd)
{
abd_t *ub_abd;
zio_t *write_zio;
int locks = (SCL_L2ARC | SCL_ZIO);
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE;
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_READER) ==
SCL_STATE);
ASSERT(vd->vdev_ops->vdev_op_leaf);
/*
* No uberblocks are stored on distributed spares, they may be
* safely skipped when expanding a leaf vdev.
*/
if (vd->vdev_ops == &vdev_draid_spare_ops)
return;
spa_config_enter(vd->vdev_spa, locks, FTAG, RW_READER);
ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
write_zio = zio_root(vd->vdev_spa, NULL, NULL, flags);
for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
const int src_label = 0;
zio_t *zio;
zio = zio_root(vd->vdev_spa, NULL, NULL, flags);
vdev_label_read(zio, vd, src_label, ub_abd,
VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
NULL, NULL, flags);
if (zio_wait(zio) || uberblock_verify(abd_to_buf(ub_abd)))
abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
for (int l = 2; l < VDEV_LABELS; l++)
vdev_label_write(write_zio, vd, l, ub_abd,
VDEV_UBERBLOCK_OFFSET(vd, n),
VDEV_UBERBLOCK_SIZE(vd), NULL, NULL,
flags | ZIO_FLAG_DONT_PROPAGATE);
}
(void) zio_wait(write_zio);
spa_config_exit(vd->vdev_spa, locks, FTAG);
abd_free(ub_abd);
}
/*
* On success, increment root zio's count of good writes.
* We only get credit for writes to known-visible vdevs; see spa_vdev_add().
*/
static void
vdev_uberblock_sync_done(zio_t *zio)
{
uint64_t *good_writes = zio->io_private;
if (zio->io_error == 0 && zio->io_vd->vdev_top->vdev_ms_array != 0)
atomic_inc_64(good_writes);
}
/*
* Write the uberblock to all labels of all leaves of the specified vdev.
*/
static void
vdev_uberblock_sync(zio_t *zio, uint64_t *good_writes,
uberblock_t *ub, vdev_t *vd, int flags)
{
for (uint64_t c = 0; c < vd->vdev_children; c++) {
vdev_uberblock_sync(zio, good_writes,
ub, vd->vdev_child[c], flags);
}
if (!vd->vdev_ops->vdev_op_leaf)
return;
if (!vdev_writeable(vd))
return;
/*
* There's no need to write uberblocks to a distributed spare, they
* are already stored on all the leaves of the parent dRAID. For
* this same reason vdev_uberblock_load_impl() skips distributed
* spares when reading uberblocks.
*/
if (vd->vdev_ops == &vdev_draid_spare_ops)
return;
/* If the vdev was expanded, need to copy uberblock rings. */
if (vd->vdev_state == VDEV_STATE_HEALTHY &&
vd->vdev_copy_uberblocks == B_TRUE) {
vdev_copy_uberblocks(vd);
vd->vdev_copy_uberblocks = B_FALSE;
}
int m = spa_multihost(vd->vdev_spa) ? MMP_BLOCKS_PER_LABEL : 0;
int n = ub->ub_txg % (VDEV_UBERBLOCK_COUNT(vd) - m);
/* Copy the uberblock_t into the ABD */
abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
for (int l = 0; l < VDEV_LABELS; l++)
vdev_label_write(zio, vd, l, ub_abd,
VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
vdev_uberblock_sync_done, good_writes,
flags | ZIO_FLAG_DONT_PROPAGATE);
abd_free(ub_abd);
}
/* Sync the uberblocks to all vdevs in svd[] */
static int
vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
{
spa_t *spa = svd[0]->vdev_spa;
zio_t *zio;
uint64_t good_writes = 0;
zio = zio_root(spa, NULL, NULL, flags);
for (int v = 0; v < svdcount; v++)
vdev_uberblock_sync(zio, &good_writes, ub, svd[v], flags);
(void) zio_wait(zio);
/*
* Flush the uberblocks to disk. This ensures that the odd labels
* are no longer needed (because the new uberblocks and the even
* labels are safely on disk), so it is safe to overwrite them.
*/
zio = zio_root(spa, NULL, NULL, flags);
for (int v = 0; v < svdcount; v++) {
if (vdev_writeable(svd[v])) {
zio_flush(zio, svd[v]);
}
}
(void) zio_wait(zio);
return (good_writes >= 1 ? 0 : EIO);
}
/*
* On success, increment the count of good writes for our top-level vdev.
*/
static void
vdev_label_sync_done(zio_t *zio)
{
uint64_t *good_writes = zio->io_private;
if (zio->io_error == 0)
atomic_inc_64(good_writes);
}
/*
* If there weren't enough good writes, indicate failure to the parent.
*/
static void
vdev_label_sync_top_done(zio_t *zio)
{
uint64_t *good_writes = zio->io_private;
if (*good_writes == 0)
zio->io_error = SET_ERROR(EIO);
kmem_free(good_writes, sizeof (uint64_t));
}
/*
* We ignore errors for log and cache devices, simply free the private data.
*/
static void
vdev_label_sync_ignore_done(zio_t *zio)
{
kmem_free(zio->io_private, sizeof (uint64_t));
}
/*
* Write all even or odd labels to all leaves of the specified vdev.
*/
static void
vdev_label_sync(zio_t *zio, uint64_t *good_writes,
vdev_t *vd, int l, uint64_t txg, int flags)
{
nvlist_t *label;
vdev_phys_t *vp;
abd_t *vp_abd;
char *buf;
size_t buflen;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_label_sync(zio, good_writes,
vd->vdev_child[c], l, txg, flags);
}
if (!vd->vdev_ops->vdev_op_leaf)
return;
if (!vdev_writeable(vd))
return;
/*
* The top-level config never needs to be written to a distributed
* spare. When read vdev_dspare_label_read_config() will generate
* the config for the vdev_label_read_config().
*/
if (vd->vdev_ops == &vdev_draid_spare_ops)
return;
/*
* Generate a label describing the top-level config to which we belong.
*/
label = spa_config_generate(vd->vdev_spa, vd, txg, B_FALSE);
vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
abd_zero(vp_abd, sizeof (vdev_phys_t));
vp = abd_to_buf(vp_abd);
buf = vp->vp_nvlist;
buflen = sizeof (vp->vp_nvlist);
if (!nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP)) {
for (; l < VDEV_LABELS; l += 2) {
vdev_label_write(zio, vd, l, vp_abd,
offsetof(vdev_label_t, vl_vdev_phys),
sizeof (vdev_phys_t),
vdev_label_sync_done, good_writes,
flags | ZIO_FLAG_DONT_PROPAGATE);
}
}
abd_free(vp_abd);
nvlist_free(label);
}
static int
vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags)
{
list_t *dl = &spa->spa_config_dirty_list;
vdev_t *vd;
zio_t *zio;
int error;
/*
* Write the new labels to disk.
*/
zio = zio_root(spa, NULL, NULL, flags);
for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) {
uint64_t *good_writes;
ASSERT(!vd->vdev_ishole);
good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
zio_t *vio = zio_null(zio, spa, NULL,
(vd->vdev_islog || vd->vdev_aux != NULL) ?
vdev_label_sync_ignore_done : vdev_label_sync_top_done,
good_writes, flags);
vdev_label_sync(vio, good_writes, vd, l, txg, flags);
zio_nowait(vio);
}
error = zio_wait(zio);
/*
* Flush the new labels to disk.
*/
zio = zio_root(spa, NULL, NULL, flags);
for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd))
zio_flush(zio, vd);
(void) zio_wait(zio);
return (error);
}
/*
* Sync the uberblock and any changes to the vdev configuration.
*
* The order of operations is carefully crafted to ensure that
* if the system panics or loses power at any time, the state on disk
* is still transactionally consistent. The in-line comments below
* describe the failure semantics at each stage.
*
* Moreover, vdev_config_sync() is designed to be idempotent: if it fails
* at any time, you can just call it again, and it will resume its work.
*/
int
vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg)
{
spa_t *spa = svd[0]->vdev_spa;
uberblock_t *ub = &spa->spa_uberblock;
int error = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
ASSERT(svdcount != 0);
retry:
/*
* Normally, we don't want to try too hard to write every label and
* uberblock. If there is a flaky disk, we don't want the rest of the
* sync process to block while we retry. But if we can't write a
* single label out, we should retry with ZIO_FLAG_TRYHARD before
* bailing out and declaring the pool faulted.
*/
if (error != 0) {
if ((flags & ZIO_FLAG_TRYHARD) != 0)
return (error);
flags |= ZIO_FLAG_TRYHARD;
}
ASSERT(ub->ub_txg <= txg);
/*
* If this isn't a resync due to I/O errors,
* and nothing changed in this transaction group,
* and the vdev configuration hasn't changed,
* then there's nothing to do.
*/
if (ub->ub_txg < txg) {
boolean_t changed = uberblock_update(ub, spa->spa_root_vdev,
txg, spa->spa_mmp.mmp_delay);
if (!changed && list_is_empty(&spa->spa_config_dirty_list))
return (0);
}
if (txg > spa_freeze_txg(spa))
return (0);
ASSERT(txg <= spa->spa_final_txg);
/*
* Flush the write cache of every disk that's been written to
* in this transaction group. This ensures that all blocks
* written in this txg will be committed to stable storage
* before any uberblock that references them.
*/
zio_t *zio = zio_root(spa, NULL, NULL, flags);
for (vdev_t *vd =
txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd != NULL;
vd = txg_list_next(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)))
zio_flush(zio, vd);
(void) zio_wait(zio);
/*
* Sync out the even labels (L0, L2) for every dirty vdev. If the
* system dies in the middle of this process, that's OK: all of the
* even labels that made it to disk will be newer than any uberblock,
* and will therefore be considered invalid. The odd labels (L1, L3),
* which have not yet been touched, will still be valid. We flush
* the new labels to disk to ensure that all even-label updates
* are committed to stable storage before the uberblock update.
*/
if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0) {
if ((flags & ZIO_FLAG_TRYHARD) != 0) {
zfs_dbgmsg("vdev_label_sync_list() returned error %d "
"for pool '%s' when syncing out the even labels "
"of dirty vdevs", error, spa_name(spa));
}
goto retry;
}
/*
* Sync the uberblocks to all vdevs in svd[].
* If the system dies in the middle of this step, there are two cases
* to consider, and the on-disk state is consistent either way:
*
* (1) If none of the new uberblocks made it to disk, then the
* previous uberblock will be the newest, and the odd labels
* (which had not yet been touched) will be valid with respect
* to that uberblock.
*
* (2) If one or more new uberblocks made it to disk, then they
* will be the newest, and the even labels (which had all
* been successfully committed) will be valid with respect
* to the new uberblocks.
*/
if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0) {
if ((flags & ZIO_FLAG_TRYHARD) != 0) {
zfs_dbgmsg("vdev_uberblock_sync_list() returned error "
"%d for pool '%s'", error, spa_name(spa));
}
goto retry;
}
if (spa_multihost(spa))
mmp_update_uberblock(spa, ub);
/*
* Sync out odd labels for every dirty vdev. If the system dies
* in the middle of this process, the even labels and the new
* uberblocks will suffice to open the pool. The next time
* the pool is opened, the first thing we'll do -- before any
* user data is modified -- is mark every vdev dirty so that
* all labels will be brought up to date. We flush the new labels
* to disk to ensure that all odd-label updates are committed to
* stable storage before the next transaction group begins.
*/
if ((error = vdev_label_sync_list(spa, 1, txg, flags)) != 0) {
if ((flags & ZIO_FLAG_TRYHARD) != 0) {
zfs_dbgmsg("vdev_label_sync_list() returned error %d "
"for pool '%s' when syncing out the odd labels of "
"dirty vdevs", error, spa_name(spa));
}
goto retry;
}
return (0);
}
diff --git a/sys/contrib/openzfs/module/zfs/vdev_raidz_math_scalar.c b/sys/contrib/openzfs/module/zfs/vdev_raidz_math_scalar.c
index cd742e146ca6..9e9c15ff4ba2 100644
--- a/sys/contrib/openzfs/module/zfs/vdev_raidz_math_scalar.c
+++ b/sys/contrib/openzfs/module/zfs/vdev_raidz_math_scalar.c
@@ -1,337 +1,337 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2016 Gvozden Nešković. All rights reserved.
*/
#include <sys/vdev_raidz_impl.h>
/*
* Provide native CPU scalar routines.
* Support 32bit and 64bit CPUs.
*/
#if ((~(0x0ULL)) >> 24) == 0xffULL
#define ELEM_SIZE 4
typedef uint32_t iv_t;
#elif ((~(0x0ULL)) >> 56) == 0xffULL
#define ELEM_SIZE 8
typedef uint64_t iv_t;
#endif
/*
* Vector type used in scalar implementation
*
* The union is expected to be of native CPU register size. Since addition
* uses XOR operation, it can be performed an all byte elements at once.
* Multiplication requires per byte access.
*/
typedef union {
iv_t e;
uint8_t b[ELEM_SIZE];
} v_t;
/*
* Precomputed lookup tables for multiplication by a constant
*
* Reconstruction path requires multiplication by a constant factors. Instead of
* performing two step lookup (log & exp tables), a direct lookup can be used
* instead. Multiplication of element 'a' by a constant 'c' is obtained as:
*
* r = vdev_raidz_mul_lt[c_log][a];
*
* where c_log = vdev_raidz_log2[c]. Log of coefficient factors is used because
* they are faster to obtain while solving the syndrome equations.
*
* PERFORMANCE NOTE:
* Even though the complete lookup table uses 64kiB, only relatively small
* portion of it is used at the same time. Following shows number of accessed
* bytes for different cases:
* - 1 failed disk: 256B (1 mul. coefficient)
* - 2 failed disks: 512B (2 mul. coefficients)
* - 3 failed disks: 1536B (6 mul. coefficients)
*
* Size of actually accessed lookup table regions is only larger for
* reconstruction of 3 failed disks, when compared to traditional log/exp
* method. But since the result is obtained in one lookup step performance is
* doubled.
*/
static uint8_t vdev_raidz_mul_lt[256][256] __attribute__((aligned(256)));
static void
raidz_init_scalar(void)
{
int c, i;
for (c = 0; c < 256; c++)
for (i = 0; i < 256; i++)
vdev_raidz_mul_lt[c][i] = gf_mul(c, i);
}
#define PREFETCHNTA(ptr, offset) {}
#define PREFETCH(ptr, offset) {}
#define XOR_ACC(src, acc) acc.e ^= ((v_t *)src)[0].e
#define XOR(src, acc) acc.e ^= src.e
#define ZERO(acc) acc.e = 0
#define COPY(src, dst) dst = src
#define LOAD(src, val) val = ((v_t *)src)[0]
#define STORE(dst, val) ((v_t *)dst)[0] = val
/*
* Constants used for optimized multiplication by 2.
*/
static const struct {
iv_t mod;
iv_t mask;
iv_t msb;
} scalar_mul2_consts = {
#if ELEM_SIZE == 8
.mod = 0x1d1d1d1d1d1d1d1dULL,
.mask = 0xfefefefefefefefeULL,
.msb = 0x8080808080808080ULL,
#else
.mod = 0x1d1d1d1dULL,
.mask = 0xfefefefeULL,
.msb = 0x80808080ULL,
#endif
};
#define MUL2_SETUP() {}
#define MUL2(a) \
{ \
iv_t _mask; \
\
_mask = (a).e & scalar_mul2_consts.msb; \
_mask = (_mask << 1) - (_mask >> 7); \
(a).e = ((a).e << 1) & scalar_mul2_consts.mask; \
(a).e = (a).e ^ (_mask & scalar_mul2_consts.mod); \
}
#define MUL4(a) \
{ \
MUL2(a); \
MUL2(a); \
}
#define MUL(c, a) \
{ \
const uint8_t *mul_lt = vdev_raidz_mul_lt[c]; \
switch (ELEM_SIZE) { \
case 8: \
a.b[7] = mul_lt[a.b[7]]; \
a.b[6] = mul_lt[a.b[6]]; \
a.b[5] = mul_lt[a.b[5]]; \
a.b[4] = mul_lt[a.b[4]]; \
- /* falls through */ \
+ fallthrough; \
case 4: \
a.b[3] = mul_lt[a.b[3]]; \
a.b[2] = mul_lt[a.b[2]]; \
a.b[1] = mul_lt[a.b[1]]; \
a.b[0] = mul_lt[a.b[0]]; \
break; \
} \
}
#define raidz_math_begin() {}
#define raidz_math_end() {}
#define SYN_STRIDE 1
#define ZERO_DEFINE() v_t d0
#define ZERO_STRIDE 1
#define ZERO_D d0
#define COPY_DEFINE() v_t d0
#define COPY_STRIDE 1
#define COPY_D d0
#define ADD_DEFINE() v_t d0
#define ADD_STRIDE 1
#define ADD_D d0
#define MUL_DEFINE() v_t d0
#define MUL_STRIDE 1
#define MUL_D d0
#define GEN_P_STRIDE 1
#define GEN_P_DEFINE() v_t p0
#define GEN_P_P p0
#define GEN_PQ_STRIDE 1
#define GEN_PQ_DEFINE() v_t d0, c0
#define GEN_PQ_D d0
#define GEN_PQ_C c0
#define GEN_PQR_STRIDE 1
#define GEN_PQR_DEFINE() v_t d0, c0
#define GEN_PQR_D d0
#define GEN_PQR_C c0
#define SYN_Q_DEFINE() v_t d0, x0
#define SYN_Q_D d0
#define SYN_Q_X x0
#define SYN_R_DEFINE() v_t d0, x0
#define SYN_R_D d0
#define SYN_R_X x0
#define SYN_PQ_DEFINE() v_t d0, x0
#define SYN_PQ_D d0
#define SYN_PQ_X x0
#define REC_PQ_STRIDE 1
#define REC_PQ_DEFINE() v_t x0, y0, t0
#define REC_PQ_X x0
#define REC_PQ_Y y0
#define REC_PQ_T t0
#define SYN_PR_DEFINE() v_t d0, x0
#define SYN_PR_D d0
#define SYN_PR_X x0
#define REC_PR_STRIDE 1
#define REC_PR_DEFINE() v_t x0, y0, t0
#define REC_PR_X x0
#define REC_PR_Y y0
#define REC_PR_T t0
#define SYN_QR_DEFINE() v_t d0, x0
#define SYN_QR_D d0
#define SYN_QR_X x0
#define REC_QR_STRIDE 1
#define REC_QR_DEFINE() v_t x0, y0, t0
#define REC_QR_X x0
#define REC_QR_Y y0
#define REC_QR_T t0
#define SYN_PQR_DEFINE() v_t d0, x0
#define SYN_PQR_D d0
#define SYN_PQR_X x0
#define REC_PQR_STRIDE 1
#define REC_PQR_DEFINE() v_t x0, y0, z0, xs0, ys0
#define REC_PQR_X x0
#define REC_PQR_Y y0
#define REC_PQR_Z z0
#define REC_PQR_XS xs0
#define REC_PQR_YS ys0
#include "vdev_raidz_math_impl.h"
DEFINE_GEN_METHODS(scalar);
DEFINE_REC_METHODS(scalar);
boolean_t
raidz_will_scalar_work(void)
{
return (B_TRUE); /* always */
}
const raidz_impl_ops_t vdev_raidz_scalar_impl = {
.init = raidz_init_scalar,
.fini = NULL,
.gen = RAIDZ_GEN_METHODS(scalar),
.rec = RAIDZ_REC_METHODS(scalar),
.is_supported = &raidz_will_scalar_work,
.name = "scalar"
};
/* Powers of 2 in the RAID-Z Galois field. */
const uint8_t vdev_raidz_pow2[256] __attribute__((aligned(256))) = {
0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,
0x1d, 0x3a, 0x74, 0xe8, 0xcd, 0x87, 0x13, 0x26,
0x4c, 0x98, 0x2d, 0x5a, 0xb4, 0x75, 0xea, 0xc9,
0x8f, 0x03, 0x06, 0x0c, 0x18, 0x30, 0x60, 0xc0,
0x9d, 0x27, 0x4e, 0x9c, 0x25, 0x4a, 0x94, 0x35,
0x6a, 0xd4, 0xb5, 0x77, 0xee, 0xc1, 0x9f, 0x23,
0x46, 0x8c, 0x05, 0x0a, 0x14, 0x28, 0x50, 0xa0,
0x5d, 0xba, 0x69, 0xd2, 0xb9, 0x6f, 0xde, 0xa1,
0x5f, 0xbe, 0x61, 0xc2, 0x99, 0x2f, 0x5e, 0xbc,
0x65, 0xca, 0x89, 0x0f, 0x1e, 0x3c, 0x78, 0xf0,
0xfd, 0xe7, 0xd3, 0xbb, 0x6b, 0xd6, 0xb1, 0x7f,
0xfe, 0xe1, 0xdf, 0xa3, 0x5b, 0xb6, 0x71, 0xe2,
0xd9, 0xaf, 0x43, 0x86, 0x11, 0x22, 0x44, 0x88,
0x0d, 0x1a, 0x34, 0x68, 0xd0, 0xbd, 0x67, 0xce,
0x81, 0x1f, 0x3e, 0x7c, 0xf8, 0xed, 0xc7, 0x93,
0x3b, 0x76, 0xec, 0xc5, 0x97, 0x33, 0x66, 0xcc,
0x85, 0x17, 0x2e, 0x5c, 0xb8, 0x6d, 0xda, 0xa9,
0x4f, 0x9e, 0x21, 0x42, 0x84, 0x15, 0x2a, 0x54,
0xa8, 0x4d, 0x9a, 0x29, 0x52, 0xa4, 0x55, 0xaa,
0x49, 0x92, 0x39, 0x72, 0xe4, 0xd5, 0xb7, 0x73,
0xe6, 0xd1, 0xbf, 0x63, 0xc6, 0x91, 0x3f, 0x7e,
0xfc, 0xe5, 0xd7, 0xb3, 0x7b, 0xf6, 0xf1, 0xff,
0xe3, 0xdb, 0xab, 0x4b, 0x96, 0x31, 0x62, 0xc4,
0x95, 0x37, 0x6e, 0xdc, 0xa5, 0x57, 0xae, 0x41,
0x82, 0x19, 0x32, 0x64, 0xc8, 0x8d, 0x07, 0x0e,
0x1c, 0x38, 0x70, 0xe0, 0xdd, 0xa7, 0x53, 0xa6,
0x51, 0xa2, 0x59, 0xb2, 0x79, 0xf2, 0xf9, 0xef,
0xc3, 0x9b, 0x2b, 0x56, 0xac, 0x45, 0x8a, 0x09,
0x12, 0x24, 0x48, 0x90, 0x3d, 0x7a, 0xf4, 0xf5,
0xf7, 0xf3, 0xfb, 0xeb, 0xcb, 0x8b, 0x0b, 0x16,
0x2c, 0x58, 0xb0, 0x7d, 0xfa, 0xe9, 0xcf, 0x83,
0x1b, 0x36, 0x6c, 0xd8, 0xad, 0x47, 0x8e, 0x01
};
/* Logs of 2 in the RAID-Z Galois field. */
const uint8_t vdev_raidz_log2[256] __attribute__((aligned(256))) = {
0x00, 0x00, 0x01, 0x19, 0x02, 0x32, 0x1a, 0xc6,
0x03, 0xdf, 0x33, 0xee, 0x1b, 0x68, 0xc7, 0x4b,
0x04, 0x64, 0xe0, 0x0e, 0x34, 0x8d, 0xef, 0x81,
0x1c, 0xc1, 0x69, 0xf8, 0xc8, 0x08, 0x4c, 0x71,
0x05, 0x8a, 0x65, 0x2f, 0xe1, 0x24, 0x0f, 0x21,
0x35, 0x93, 0x8e, 0xda, 0xf0, 0x12, 0x82, 0x45,
0x1d, 0xb5, 0xc2, 0x7d, 0x6a, 0x27, 0xf9, 0xb9,
0xc9, 0x9a, 0x09, 0x78, 0x4d, 0xe4, 0x72, 0xa6,
0x06, 0xbf, 0x8b, 0x62, 0x66, 0xdd, 0x30, 0xfd,
0xe2, 0x98, 0x25, 0xb3, 0x10, 0x91, 0x22, 0x88,
0x36, 0xd0, 0x94, 0xce, 0x8f, 0x96, 0xdb, 0xbd,
0xf1, 0xd2, 0x13, 0x5c, 0x83, 0x38, 0x46, 0x40,
0x1e, 0x42, 0xb6, 0xa3, 0xc3, 0x48, 0x7e, 0x6e,
0x6b, 0x3a, 0x28, 0x54, 0xfa, 0x85, 0xba, 0x3d,
0xca, 0x5e, 0x9b, 0x9f, 0x0a, 0x15, 0x79, 0x2b,
0x4e, 0xd4, 0xe5, 0xac, 0x73, 0xf3, 0xa7, 0x57,
0x07, 0x70, 0xc0, 0xf7, 0x8c, 0x80, 0x63, 0x0d,
0x67, 0x4a, 0xde, 0xed, 0x31, 0xc5, 0xfe, 0x18,
0xe3, 0xa5, 0x99, 0x77, 0x26, 0xb8, 0xb4, 0x7c,
0x11, 0x44, 0x92, 0xd9, 0x23, 0x20, 0x89, 0x2e,
0x37, 0x3f, 0xd1, 0x5b, 0x95, 0xbc, 0xcf, 0xcd,
0x90, 0x87, 0x97, 0xb2, 0xdc, 0xfc, 0xbe, 0x61,
0xf2, 0x56, 0xd3, 0xab, 0x14, 0x2a, 0x5d, 0x9e,
0x84, 0x3c, 0x39, 0x53, 0x47, 0x6d, 0x41, 0xa2,
0x1f, 0x2d, 0x43, 0xd8, 0xb7, 0x7b, 0xa4, 0x76,
0xc4, 0x17, 0x49, 0xec, 0x7f, 0x0c, 0x6f, 0xf6,
0x6c, 0xa1, 0x3b, 0x52, 0x29, 0x9d, 0x55, 0xaa,
0xfb, 0x60, 0x86, 0xb1, 0xbb, 0xcc, 0x3e, 0x5a,
0xcb, 0x59, 0x5f, 0xb0, 0x9c, 0xa9, 0xa0, 0x51,
0x0b, 0xf5, 0x16, 0xeb, 0x7a, 0x75, 0x2c, 0xd7,
0x4f, 0xae, 0xd5, 0xe9, 0xe6, 0xe7, 0xad, 0xe8,
0x74, 0xd6, 0xf4, 0xea, 0xa8, 0x50, 0x58, 0xaf,
};
diff --git a/sys/contrib/openzfs/module/zfs/zfs_fm.c b/sys/contrib/openzfs/module/zfs/zfs_fm.c
index 60e631567a89..007f31b4e7b3 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_fm.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_fm.c
@@ -1,1458 +1,1510 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012,2021 by Delphix. All rights reserved.
*/
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/fm/fs/zfs.h>
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/sysevent.h>
/*
* This general routine is responsible for generating all the different ZFS
* ereports. The payload is dependent on the class, and which arguments are
* supplied to the function:
*
* EREPORT POOL VDEV IO
* block X X X
* data X X
* device X X
* pool X
*
* If we are in a loading state, all errors are chained together by the same
* SPA-wide ENA (Error Numeric Association).
*
* For isolated I/O requests, we get the ENA from the zio_t. The propagation
* gets very complicated due to RAID-Z, gang blocks, and vdev caching. We want
* to chain together all ereports associated with a logical piece of data. For
* read I/Os, there are basically three 'types' of I/O, which form a roughly
* layered diagram:
*
* +---------------+
* | Aggregate I/O | No associated logical data or device
* +---------------+
* |
* V
* +---------------+ Reads associated with a piece of logical data.
* | Read I/O | This includes reads on behalf of RAID-Z,
* +---------------+ mirrors, gang blocks, retries, etc.
* |
* V
* +---------------+ Reads associated with a particular device, but
* | Physical I/O | no logical data. Issued as part of vdev caching
* +---------------+ and I/O aggregation.
*
* Note that 'physical I/O' here is not the same terminology as used in the rest
* of ZIO. Typically, 'physical I/O' simply means that there is no attached
* blockpointer. But I/O with no associated block pointer can still be related
* to a logical piece of data (i.e. RAID-Z requests).
*
* Purely physical I/O always have unique ENAs. They are not related to a
* particular piece of logical data, and therefore cannot be chained together.
* We still generate an ereport, but the DE doesn't correlate it with any
* logical piece of data. When such an I/O fails, the delegated I/O requests
* will issue a retry, which will trigger the 'real' ereport with the correct
* ENA.
*
* We keep track of the ENA for a ZIO chain through the 'io_logical' member.
* When a new logical I/O is issued, we set this to point to itself. Child I/Os
* then inherit this pointer, so that when it is first set subsequent failures
* will use the same ENA. For vdev cache fill and queue aggregation I/O,
* this pointer is set to NULL, and no ereport will be generated (since it
* doesn't actually correspond to any particular device or piece of data,
* and the caller will always retry without caching or queueing anyway).
*
* For checksum errors, we want to include more information about the actual
* error which occurs. Accordingly, we build an ereport when the error is
* noticed, but instead of sending it in immediately, we hang it off of the
* io_cksum_report field of the logical IO. When the logical IO completes
* (successfully or not), zfs_ereport_finish_checksum() is called with the
* good and bad versions of the buffer (if available), and we annotate the
* ereport with information about the differences.
*/
#ifdef _KERNEL
/*
* Duplicate ereport Detection
*
* Some ereports are retained momentarily for detecting duplicates. These
* are kept in a recent_events_node_t in both a time-ordered list and an AVL
* tree of recent unique ereports.
*
* The lifespan of these recent ereports is bounded (15 mins) and a cleaner
* task is used to purge stale entries.
*/
static list_t recent_events_list;
static avl_tree_t recent_events_tree;
static kmutex_t recent_events_lock;
static taskqid_t recent_events_cleaner_tqid;
/*
* Each node is about 128 bytes so 2,000 would consume 1/4 MiB.
*
* This setting can be changed dynamically and setting it to zero
* disables duplicate detection.
*/
unsigned int zfs_zevent_retain_max = 2000;
/*
* The lifespan for a recent ereport entry. The default of 15 minutes is
* intended to outlive the zfs diagnosis engine's threshold of 10 errors
* over a period of 10 minutes.
*/
unsigned int zfs_zevent_retain_expire_secs = 900;
typedef enum zfs_subclass {
ZSC_IO,
ZSC_DATA,
ZSC_CHECKSUM
} zfs_subclass_t;
typedef struct {
/* common criteria */
uint64_t re_pool_guid;
uint64_t re_vdev_guid;
int re_io_error;
uint64_t re_io_size;
uint64_t re_io_offset;
zfs_subclass_t re_subclass;
zio_priority_t re_io_priority;
/* logical zio criteria (optional) */
zbookmark_phys_t re_io_bookmark;
/* internal state */
avl_node_t re_tree_link;
list_node_t re_list_link;
uint64_t re_timestamp;
} recent_events_node_t;
static int
recent_events_compare(const void *a, const void *b)
{
const recent_events_node_t *node1 = a;
const recent_events_node_t *node2 = b;
int cmp;
/*
* The comparison order here is somewhat arbitrary.
* What's important is that if every criteria matches, then it
* is a duplicate (i.e. compare returns 0)
*/
if ((cmp = TREE_CMP(node1->re_subclass, node2->re_subclass)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_pool_guid, node2->re_pool_guid)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_vdev_guid, node2->re_vdev_guid)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_error, node2->re_io_error)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_priority, node2->re_io_priority)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_size, node2->re_io_size)) != 0)
return (cmp);
if ((cmp = TREE_CMP(node1->re_io_offset, node2->re_io_offset)) != 0)
return (cmp);
const zbookmark_phys_t *zb1 = &node1->re_io_bookmark;
const zbookmark_phys_t *zb2 = &node2->re_io_bookmark;
if ((cmp = TREE_CMP(zb1->zb_objset, zb2->zb_objset)) != 0)
return (cmp);
if ((cmp = TREE_CMP(zb1->zb_object, zb2->zb_object)) != 0)
return (cmp);
if ((cmp = TREE_CMP(zb1->zb_level, zb2->zb_level)) != 0)
return (cmp);
if ((cmp = TREE_CMP(zb1->zb_blkid, zb2->zb_blkid)) != 0)
return (cmp);
return (0);
}
static void zfs_ereport_schedule_cleaner(void);
/*
* background task to clean stale recent event nodes.
*/
/*ARGSUSED*/
static void
zfs_ereport_cleaner(void *arg)
{
recent_events_node_t *entry;
uint64_t now = gethrtime();
/*
* purge expired entries
*/
mutex_enter(&recent_events_lock);
while ((entry = list_tail(&recent_events_list)) != NULL) {
uint64_t age = NSEC2SEC(now - entry->re_timestamp);
if (age <= zfs_zevent_retain_expire_secs)
break;
/* remove expired node */
avl_remove(&recent_events_tree, entry);
list_remove(&recent_events_list, entry);
kmem_free(entry, sizeof (*entry));
}
/* Restart the cleaner if more entries remain */
recent_events_cleaner_tqid = 0;
if (!list_is_empty(&recent_events_list))
zfs_ereport_schedule_cleaner();
mutex_exit(&recent_events_lock);
}
static void
zfs_ereport_schedule_cleaner(void)
{
ASSERT(MUTEX_HELD(&recent_events_lock));
uint64_t timeout = SEC2NSEC(zfs_zevent_retain_expire_secs + 1);
recent_events_cleaner_tqid = taskq_dispatch_delay(
system_delay_taskq, zfs_ereport_cleaner, NULL, TQ_SLEEP,
ddi_get_lbolt() + NSEC_TO_TICK(timeout));
}
/*
* Clear entries for a given vdev or all vdevs in a pool when vdev == NULL
*/
void
zfs_ereport_clear(spa_t *spa, vdev_t *vd)
{
uint64_t vdev_guid, pool_guid;
int cnt = 0;
ASSERT(vd != NULL || spa != NULL);
if (vd == NULL) {
vdev_guid = 0;
pool_guid = spa_guid(spa);
} else {
vdev_guid = vd->vdev_guid;
pool_guid = 0;
}
mutex_enter(&recent_events_lock);
recent_events_node_t *next = list_head(&recent_events_list);
while (next != NULL) {
recent_events_node_t *entry = next;
next = list_next(&recent_events_list, next);
if (entry->re_vdev_guid == vdev_guid ||
entry->re_pool_guid == pool_guid) {
avl_remove(&recent_events_tree, entry);
list_remove(&recent_events_list, entry);
kmem_free(entry, sizeof (*entry));
cnt++;
}
}
mutex_exit(&recent_events_lock);
}
/*
* Check if an ereport would be a duplicate of one recently posted.
*
* An ereport is considered a duplicate if the set of criteria in
* recent_events_node_t all match.
*
* Only FM_EREPORT_ZFS_IO, FM_EREPORT_ZFS_DATA, and FM_EREPORT_ZFS_CHECKSUM
* are candidates for duplicate checking.
*/
static boolean_t
zfs_ereport_is_duplicate(const char *subclass, spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, zio_t *zio, uint64_t offset, uint64_t size)
{
recent_events_node_t search = {0}, *entry;
if (vd == NULL || zio == NULL)
return (B_FALSE);
if (zfs_zevent_retain_max == 0)
return (B_FALSE);
if (strcmp(subclass, FM_EREPORT_ZFS_IO) == 0)
search.re_subclass = ZSC_IO;
else if (strcmp(subclass, FM_EREPORT_ZFS_DATA) == 0)
search.re_subclass = ZSC_DATA;
else if (strcmp(subclass, FM_EREPORT_ZFS_CHECKSUM) == 0)
search.re_subclass = ZSC_CHECKSUM;
else
return (B_FALSE);
search.re_pool_guid = spa_guid(spa);
search.re_vdev_guid = vd->vdev_guid;
search.re_io_error = zio->io_error;
search.re_io_priority = zio->io_priority;
/* if size is supplied use it over what's in zio */
if (size) {
search.re_io_size = size;
search.re_io_offset = offset;
} else {
search.re_io_size = zio->io_size;
search.re_io_offset = zio->io_offset;
}
/* grab optional logical zio criteria */
if (zb != NULL) {
search.re_io_bookmark.zb_objset = zb->zb_objset;
search.re_io_bookmark.zb_object = zb->zb_object;
search.re_io_bookmark.zb_level = zb->zb_level;
search.re_io_bookmark.zb_blkid = zb->zb_blkid;
}
uint64_t now = gethrtime();
mutex_enter(&recent_events_lock);
/* check if we have seen this one recently */
entry = avl_find(&recent_events_tree, &search, NULL);
if (entry != NULL) {
uint64_t age = NSEC2SEC(now - entry->re_timestamp);
/*
* There is still an active cleaner (since we're here).
* Reset the last seen time for this duplicate entry
* so that its lifespand gets extended.
*/
list_remove(&recent_events_list, entry);
list_insert_head(&recent_events_list, entry);
entry->re_timestamp = now;
zfs_zevent_track_duplicate();
mutex_exit(&recent_events_lock);
return (age <= zfs_zevent_retain_expire_secs);
}
if (avl_numnodes(&recent_events_tree) >= zfs_zevent_retain_max) {
/* recycle oldest node */
entry = list_tail(&recent_events_list);
ASSERT(entry != NULL);
list_remove(&recent_events_list, entry);
avl_remove(&recent_events_tree, entry);
} else {
entry = kmem_alloc(sizeof (recent_events_node_t), KM_SLEEP);
}
/* record this as a recent ereport */
*entry = search;
avl_add(&recent_events_tree, entry);
list_insert_head(&recent_events_list, entry);
entry->re_timestamp = now;
/* Start a cleaner if not already scheduled */
if (recent_events_cleaner_tqid == 0)
zfs_ereport_schedule_cleaner();
mutex_exit(&recent_events_lock);
return (B_FALSE);
}
void
zfs_zevent_post_cb(nvlist_t *nvl, nvlist_t *detector)
{
if (nvl)
fm_nvlist_destroy(nvl, FM_NVA_FREE);
if (detector)
fm_nvlist_destroy(detector, FM_NVA_FREE);
}
/*
* We want to rate limit ZIO delay, deadman, and checksum events so as to not
* flood zevent consumers when a disk is acting up.
*
* Returns 1 if we're ratelimiting, 0 if not.
*/
static int
zfs_is_ratelimiting_event(const char *subclass, vdev_t *vd)
{
int rc = 0;
/*
* zfs_ratelimit() returns 1 if we're *not* ratelimiting and 0 if we
* are. Invert it to get our return value.
*/
if (strcmp(subclass, FM_EREPORT_ZFS_DELAY) == 0) {
rc = !zfs_ratelimit(&vd->vdev_delay_rl);
} else if (strcmp(subclass, FM_EREPORT_ZFS_DEADMAN) == 0) {
rc = !zfs_ratelimit(&vd->vdev_deadman_rl);
} else if (strcmp(subclass, FM_EREPORT_ZFS_CHECKSUM) == 0) {
rc = !zfs_ratelimit(&vd->vdev_checksum_rl);
}
if (rc) {
/* We're rate limiting */
fm_erpt_dropped_increment();
}
return (rc);
}
/*
* Return B_TRUE if the event actually posted, B_FALSE if not.
*/
static boolean_t
zfs_ereport_start(nvlist_t **ereport_out, nvlist_t **detector_out,
const char *subclass, spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb,
zio_t *zio, uint64_t stateoroffset, uint64_t size)
{
nvlist_t *ereport, *detector;
uint64_t ena;
char class[64];
if ((ereport = fm_nvlist_create(NULL)) == NULL)
return (B_FALSE);
if ((detector = fm_nvlist_create(NULL)) == NULL) {
fm_nvlist_destroy(ereport, FM_NVA_FREE);
return (B_FALSE);
}
/*
* Serialize ereport generation
*/
mutex_enter(&spa->spa_errlist_lock);
/*
* Determine the ENA to use for this event. If we are in a loading
* state, use a SPA-wide ENA. Otherwise, if we are in an I/O state, use
* a root zio-wide ENA. Otherwise, simply use a unique ENA.
*/
if (spa_load_state(spa) != SPA_LOAD_NONE) {
if (spa->spa_ena == 0)
spa->spa_ena = fm_ena_generate(0, FM_ENA_FMT1);
ena = spa->spa_ena;
} else if (zio != NULL && zio->io_logical != NULL) {
if (zio->io_logical->io_ena == 0)
zio->io_logical->io_ena =
fm_ena_generate(0, FM_ENA_FMT1);
ena = zio->io_logical->io_ena;
} else {
ena = fm_ena_generate(0, FM_ENA_FMT1);
}
/*
* Construct the full class, detector, and other standard FMA fields.
*/
(void) snprintf(class, sizeof (class), "%s.%s",
ZFS_ERROR_CLASS, subclass);
fm_fmri_zfs_set(detector, FM_ZFS_SCHEME_VERSION, spa_guid(spa),
vd != NULL ? vd->vdev_guid : 0);
fm_ereport_set(ereport, FM_EREPORT_VERSION, class, ena, detector, NULL);
/*
* Construct the per-ereport payload, depending on which parameters are
* passed in.
*/
/*
* Generic payload members common to all ereports.
*/
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_POOL, DATA_TYPE_STRING, spa_name(spa),
FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, DATA_TYPE_UINT64, spa_guid(spa),
FM_EREPORT_PAYLOAD_ZFS_POOL_STATE, DATA_TYPE_UINT64,
(uint64_t)spa_state(spa),
FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, DATA_TYPE_INT32,
(int32_t)spa_load_state(spa), NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_POOL_FAILMODE,
DATA_TYPE_STRING,
spa_get_failmode(spa) == ZIO_FAILURE_MODE_WAIT ?
FM_EREPORT_FAILMODE_WAIT :
spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE ?
FM_EREPORT_FAILMODE_CONTINUE : FM_EREPORT_FAILMODE_PANIC,
NULL);
if (vd != NULL) {
vdev_t *pvd = vd->vdev_parent;
vdev_queue_t *vq = &vd->vdev_queue;
vdev_stat_t *vs = &vd->vdev_stat;
vdev_t *spare_vd;
uint64_t *spare_guids;
char **spare_paths;
int i, spare_count;
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID,
DATA_TYPE_UINT64, vd->vdev_guid,
FM_EREPORT_PAYLOAD_ZFS_VDEV_TYPE,
DATA_TYPE_STRING, vd->vdev_ops->vdev_op_type, NULL);
if (vd->vdev_path != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH,
DATA_TYPE_STRING, vd->vdev_path, NULL);
if (vd->vdev_devid != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID,
DATA_TYPE_STRING, vd->vdev_devid, NULL);
if (vd->vdev_fru != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU,
DATA_TYPE_STRING, vd->vdev_fru, NULL);
if (vd->vdev_enc_sysfs_path != NULL)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
DATA_TYPE_STRING, vd->vdev_enc_sysfs_path, NULL);
if (vd->vdev_ashift)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ASHIFT,
DATA_TYPE_UINT64, vd->vdev_ashift, NULL);
if (vq != NULL) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_COMP_TS,
DATA_TYPE_UINT64, vq->vq_io_complete_ts, NULL);
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DELTA_TS,
DATA_TYPE_UINT64, vq->vq_io_delta_ts, NULL);
}
if (vs != NULL) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_VDEV_READ_ERRORS,
DATA_TYPE_UINT64, vs->vs_read_errors,
FM_EREPORT_PAYLOAD_ZFS_VDEV_WRITE_ERRORS,
DATA_TYPE_UINT64, vs->vs_write_errors,
FM_EREPORT_PAYLOAD_ZFS_VDEV_CKSUM_ERRORS,
DATA_TYPE_UINT64, vs->vs_checksum_errors,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DELAYS,
DATA_TYPE_UINT64, vs->vs_slow_ios,
NULL);
}
if (pvd != NULL) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PARENT_GUID,
DATA_TYPE_UINT64, pvd->vdev_guid,
FM_EREPORT_PAYLOAD_ZFS_PARENT_TYPE,
DATA_TYPE_STRING, pvd->vdev_ops->vdev_op_type,
NULL);
if (pvd->vdev_path)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PARENT_PATH,
DATA_TYPE_STRING, pvd->vdev_path, NULL);
if (pvd->vdev_devid)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PARENT_DEVID,
DATA_TYPE_STRING, pvd->vdev_devid, NULL);
}
spare_count = spa->spa_spares.sav_count;
spare_paths = kmem_zalloc(sizeof (char *) * spare_count,
KM_SLEEP);
spare_guids = kmem_zalloc(sizeof (uint64_t) * spare_count,
KM_SLEEP);
for (i = 0; i < spare_count; i++) {
spare_vd = spa->spa_spares.sav_vdevs[i];
if (spare_vd) {
spare_paths[i] = spare_vd->vdev_path;
spare_guids[i] = spare_vd->vdev_guid;
}
}
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_PATHS,
DATA_TYPE_STRING_ARRAY, spare_count, spare_paths,
FM_EREPORT_PAYLOAD_ZFS_VDEV_SPARE_GUIDS,
DATA_TYPE_UINT64_ARRAY, spare_count, spare_guids, NULL);
kmem_free(spare_guids, sizeof (uint64_t) * spare_count);
kmem_free(spare_paths, sizeof (char *) * spare_count);
}
if (zio != NULL) {
/*
* Payload common to all I/Os.
*/
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_ERR,
DATA_TYPE_INT32, zio->io_error, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_FLAGS,
DATA_TYPE_INT32, zio->io_flags, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_STAGE,
DATA_TYPE_UINT32, zio->io_stage, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_PIPELINE,
DATA_TYPE_UINT32, zio->io_pipeline, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELAY,
DATA_TYPE_UINT64, zio->io_delay, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_TIMESTAMP,
DATA_TYPE_UINT64, zio->io_timestamp, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_DELTA,
DATA_TYPE_UINT64, zio->io_delta, NULL);
fm_payload_set(ereport, FM_EREPORT_PAYLOAD_ZFS_ZIO_PRIORITY,
DATA_TYPE_UINT32, zio->io_priority, NULL);
/*
* If the 'size' parameter is non-zero, it indicates this is a
* RAID-Z or other I/O where the physical offset and length are
* provided for us, instead of within the zio_t.
*/
if (vd != NULL) {
if (size)
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
DATA_TYPE_UINT64, stateoroffset,
FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
DATA_TYPE_UINT64, size, NULL);
else
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OFFSET,
DATA_TYPE_UINT64, zio->io_offset,
FM_EREPORT_PAYLOAD_ZFS_ZIO_SIZE,
DATA_TYPE_UINT64, zio->io_size, NULL);
}
} else if (vd != NULL) {
/*
* If we have a vdev but no zio, this is a device fault, and the
* 'stateoroffset' parameter indicates the previous state of the
* vdev.
*/
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_PREV_STATE,
DATA_TYPE_UINT64, stateoroffset, NULL);
}
/*
* Payload for I/Os with corresponding logical information.
*/
if (zb != NULL && (zio == NULL || zio->io_logical != NULL)) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJSET,
DATA_TYPE_UINT64, zb->zb_objset,
FM_EREPORT_PAYLOAD_ZFS_ZIO_OBJECT,
DATA_TYPE_UINT64, zb->zb_object,
FM_EREPORT_PAYLOAD_ZFS_ZIO_LEVEL,
DATA_TYPE_INT64, zb->zb_level,
FM_EREPORT_PAYLOAD_ZFS_ZIO_BLKID,
DATA_TYPE_UINT64, zb->zb_blkid, NULL);
}
mutex_exit(&spa->spa_errlist_lock);
*ereport_out = ereport;
*detector_out = detector;
return (B_TRUE);
}
/* if it's <= 128 bytes, save the corruption directly */
#define ZFM_MAX_INLINE (128 / sizeof (uint64_t))
#define MAX_RANGES 16
typedef struct zfs_ecksum_info {
/* histograms of set and cleared bits by bit number in a 64-bit word */
uint32_t zei_histogram_set[sizeof (uint64_t) * NBBY];
uint32_t zei_histogram_cleared[sizeof (uint64_t) * NBBY];
/* inline arrays of bits set and cleared. */
uint64_t zei_bits_set[ZFM_MAX_INLINE];
uint64_t zei_bits_cleared[ZFM_MAX_INLINE];
/*
* for each range, the number of bits set and cleared. The Hamming
* distance between the good and bad buffers is the sum of them all.
*/
uint32_t zei_range_sets[MAX_RANGES];
uint32_t zei_range_clears[MAX_RANGES];
struct zei_ranges {
uint32_t zr_start;
uint32_t zr_end;
} zei_ranges[MAX_RANGES];
size_t zei_range_count;
uint32_t zei_mingap;
uint32_t zei_allowed_mingap;
} zfs_ecksum_info_t;
static void
update_histogram(uint64_t value_arg, uint32_t *hist, uint32_t *count)
{
size_t i;
size_t bits = 0;
uint64_t value = BE_64(value_arg);
/* We store the bits in big-endian (largest-first) order */
for (i = 0; i < 64; i++) {
if (value & (1ull << i)) {
hist[63 - i]++;
++bits;
}
}
/* update the count of bits changed */
*count += bits;
}
/*
* We've now filled up the range array, and need to increase "mingap" and
* shrink the range list accordingly. zei_mingap is always the smallest
* distance between array entries, so we set the new_allowed_gap to be
* one greater than that. We then go through the list, joining together
* any ranges which are closer than the new_allowed_gap.
*
* By construction, there will be at least one. We also update zei_mingap
* to the new smallest gap, to prepare for our next invocation.
*/
static void
zei_shrink_ranges(zfs_ecksum_info_t *eip)
{
uint32_t mingap = UINT32_MAX;
uint32_t new_allowed_gap = eip->zei_mingap + 1;
size_t idx, output;
size_t max = eip->zei_range_count;
struct zei_ranges *r = eip->zei_ranges;
ASSERT3U(eip->zei_range_count, >, 0);
ASSERT3U(eip->zei_range_count, <=, MAX_RANGES);
output = idx = 0;
while (idx < max - 1) {
uint32_t start = r[idx].zr_start;
uint32_t end = r[idx].zr_end;
while (idx < max - 1) {
idx++;
uint32_t nstart = r[idx].zr_start;
uint32_t nend = r[idx].zr_end;
uint32_t gap = nstart - end;
if (gap < new_allowed_gap) {
end = nend;
continue;
}
if (gap < mingap)
mingap = gap;
break;
}
r[output].zr_start = start;
r[output].zr_end = end;
output++;
}
ASSERT3U(output, <, eip->zei_range_count);
eip->zei_range_count = output;
eip->zei_mingap = mingap;
eip->zei_allowed_mingap = new_allowed_gap;
}
static void
zei_add_range(zfs_ecksum_info_t *eip, int start, int end)
{
struct zei_ranges *r = eip->zei_ranges;
size_t count = eip->zei_range_count;
if (count >= MAX_RANGES) {
zei_shrink_ranges(eip);
count = eip->zei_range_count;
}
if (count == 0) {
eip->zei_mingap = UINT32_MAX;
eip->zei_allowed_mingap = 1;
} else {
int gap = start - r[count - 1].zr_end;
if (gap < eip->zei_allowed_mingap) {
r[count - 1].zr_end = end;
return;
}
if (gap < eip->zei_mingap)
eip->zei_mingap = gap;
}
r[count].zr_start = start;
r[count].zr_end = end;
eip->zei_range_count++;
}
static size_t
zei_range_total_size(zfs_ecksum_info_t *eip)
{
struct zei_ranges *r = eip->zei_ranges;
size_t count = eip->zei_range_count;
size_t result = 0;
size_t idx;
for (idx = 0; idx < count; idx++)
result += (r[idx].zr_end - r[idx].zr_start);
return (result);
}
static zfs_ecksum_info_t *
annotate_ecksum(nvlist_t *ereport, zio_bad_cksum_t *info,
const abd_t *goodabd, const abd_t *badabd, size_t size,
boolean_t drop_if_identical)
{
const uint64_t *good;
const uint64_t *bad;
uint64_t allset = 0;
uint64_t allcleared = 0;
size_t nui64s = size / sizeof (uint64_t);
size_t inline_size;
int no_inline = 0;
size_t idx;
size_t range;
size_t offset = 0;
ssize_t start = -1;
zfs_ecksum_info_t *eip = kmem_zalloc(sizeof (*eip), KM_SLEEP);
/* don't do any annotation for injected checksum errors */
if (info != NULL && info->zbc_injected)
return (eip);
if (info != NULL && info->zbc_has_cksum) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_EXPECTED,
DATA_TYPE_UINT64_ARRAY,
sizeof (info->zbc_expected) / sizeof (uint64_t),
(uint64_t *)&info->zbc_expected,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_ACTUAL,
DATA_TYPE_UINT64_ARRAY,
sizeof (info->zbc_actual) / sizeof (uint64_t),
(uint64_t *)&info->zbc_actual,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_ALGO,
DATA_TYPE_STRING,
info->zbc_checksum_name,
NULL);
if (info->zbc_byteswapped) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_CKSUM_BYTESWAP,
DATA_TYPE_BOOLEAN, 1,
NULL);
}
}
if (badabd == NULL || goodabd == NULL)
return (eip);
ASSERT3U(nui64s, <=, UINT32_MAX);
ASSERT3U(size, ==, nui64s * sizeof (uint64_t));
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(size, <=, UINT32_MAX);
good = (const uint64_t *) abd_borrow_buf_copy((abd_t *)goodabd, size);
bad = (const uint64_t *) abd_borrow_buf_copy((abd_t *)badabd, size);
/* build up the range list by comparing the two buffers. */
for (idx = 0; idx < nui64s; idx++) {
if (good[idx] == bad[idx]) {
if (start == -1)
continue;
zei_add_range(eip, start, idx);
start = -1;
} else {
if (start != -1)
continue;
start = idx;
}
}
if (start != -1)
zei_add_range(eip, start, idx);
/* See if it will fit in our inline buffers */
inline_size = zei_range_total_size(eip);
if (inline_size > ZFM_MAX_INLINE)
no_inline = 1;
/*
* If there is no change and we want to drop if the buffers are
* identical, do so.
*/
if (inline_size == 0 && drop_if_identical) {
kmem_free(eip, sizeof (*eip));
abd_return_buf((abd_t *)goodabd, (void *)good, size);
abd_return_buf((abd_t *)badabd, (void *)bad, size);
return (NULL);
}
/*
* Now walk through the ranges, filling in the details of the
* differences. Also convert our uint64_t-array offsets to byte
* offsets.
*/
for (range = 0; range < eip->zei_range_count; range++) {
size_t start = eip->zei_ranges[range].zr_start;
size_t end = eip->zei_ranges[range].zr_end;
for (idx = start; idx < end; idx++) {
uint64_t set, cleared;
// bits set in bad, but not in good
set = ((~good[idx]) & bad[idx]);
// bits set in good, but not in bad
cleared = (good[idx] & (~bad[idx]));
allset |= set;
allcleared |= cleared;
if (!no_inline) {
ASSERT3U(offset, <, inline_size);
eip->zei_bits_set[offset] = set;
eip->zei_bits_cleared[offset] = cleared;
offset++;
}
update_histogram(set, eip->zei_histogram_set,
&eip->zei_range_sets[range]);
update_histogram(cleared, eip->zei_histogram_cleared,
&eip->zei_range_clears[range]);
}
/* convert to byte offsets */
eip->zei_ranges[range].zr_start *= sizeof (uint64_t);
eip->zei_ranges[range].zr_end *= sizeof (uint64_t);
}
abd_return_buf((abd_t *)goodabd, (void *)good, size);
abd_return_buf((abd_t *)badabd, (void *)bad, size);
eip->zei_allowed_mingap *= sizeof (uint64_t);
inline_size *= sizeof (uint64_t);
/* fill in ereport */
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_BAD_OFFSET_RANGES,
DATA_TYPE_UINT32_ARRAY, 2 * eip->zei_range_count,
(uint32_t *)eip->zei_ranges,
FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_MIN_GAP,
DATA_TYPE_UINT32, eip->zei_allowed_mingap,
FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_SETS,
DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_sets,
FM_EREPORT_PAYLOAD_ZFS_BAD_RANGE_CLEARS,
DATA_TYPE_UINT32_ARRAY, eip->zei_range_count, eip->zei_range_clears,
NULL);
if (!no_inline) {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_BAD_SET_BITS,
DATA_TYPE_UINT8_ARRAY,
inline_size, (uint8_t *)eip->zei_bits_set,
FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_BITS,
DATA_TYPE_UINT8_ARRAY,
inline_size, (uint8_t *)eip->zei_bits_cleared,
NULL);
} else {
fm_payload_set(ereport,
FM_EREPORT_PAYLOAD_ZFS_BAD_SET_HISTOGRAM,
DATA_TYPE_UINT32_ARRAY,
NBBY * sizeof (uint64_t), eip->zei_histogram_set,
FM_EREPORT_PAYLOAD_ZFS_BAD_CLEARED_HISTOGRAM,
DATA_TYPE_UINT32_ARRAY,
NBBY * sizeof (uint64_t), eip->zei_histogram_cleared,
NULL);
}
return (eip);
}
#else
/*ARGSUSED*/
void
zfs_ereport_clear(spa_t *spa, vdev_t *vd)
{
}
#endif
/*
* Make sure our event is still valid for the given zio/vdev/pool. For example,
* we don't want to keep logging events for a faulted or missing vdev.
*/
boolean_t
zfs_ereport_is_valid(const char *subclass, spa_t *spa, vdev_t *vd, zio_t *zio)
{
#ifdef _KERNEL
/*
* If we are doing a spa_tryimport() or in recovery mode,
* ignore errors.
*/
if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT ||
spa_load_state(spa) == SPA_LOAD_RECOVER)
return (B_FALSE);
/*
* If we are in the middle of opening a pool, and the previous attempt
* failed, don't bother logging any new ereports - we're just going to
* get the same diagnosis anyway.
*/
if (spa_load_state(spa) != SPA_LOAD_NONE &&
spa->spa_last_open_failed)
return (B_FALSE);
if (zio != NULL) {
/*
* If this is not a read or write zio, ignore the error. This
* can occur if the DKIOCFLUSHWRITECACHE ioctl fails.
*/
if (zio->io_type != ZIO_TYPE_READ &&
zio->io_type != ZIO_TYPE_WRITE)
return (B_FALSE);
if (vd != NULL) {
/*
* If the vdev has already been marked as failing due
* to a failed probe, then ignore any subsequent I/O
* errors, as the DE will automatically fault the vdev
* on the first such failure. This also catches cases
* where vdev_remove_wanted is set and the device has
* not yet been asynchronously placed into the REMOVED
* state.
*/
if (zio->io_vd == vd && !vdev_accessible(vd, zio))
return (B_FALSE);
/*
* Ignore checksum errors for reads from DTL regions of
* leaf vdevs.
*/
if (zio->io_type == ZIO_TYPE_READ &&
zio->io_error == ECKSUM &&
vd->vdev_ops->vdev_op_leaf &&
vdev_dtl_contains(vd, DTL_MISSING, zio->io_txg, 1))
return (B_FALSE);
}
}
/*
* For probe failure, we want to avoid posting ereports if we've
* already removed the device in the meantime.
*/
if (vd != NULL &&
strcmp(subclass, FM_EREPORT_ZFS_PROBE_FAILURE) == 0 &&
(vd->vdev_remove_wanted || vd->vdev_state == VDEV_STATE_REMOVED))
return (B_FALSE);
/* Ignore bogus delay events (like from ioctls or unqueued IOs) */
if ((strcmp(subclass, FM_EREPORT_ZFS_DELAY) == 0) &&
(zio != NULL) && (!zio->io_timestamp)) {
return (B_FALSE);
}
#endif
return (B_TRUE);
}
/*
* Post an ereport for the given subclass
*
* Returns
* - 0 if an event was posted
* - EINVAL if there was a problem posting event
* - EBUSY if the event was rate limited
* - EALREADY if the event was already posted (duplicate)
*/
int
zfs_ereport_post(const char *subclass, spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, zio_t *zio, uint64_t state)
{
int rc = 0;
#ifdef _KERNEL
nvlist_t *ereport = NULL;
nvlist_t *detector = NULL;
if (!zfs_ereport_is_valid(subclass, spa, vd, zio))
return (EINVAL);
if (zfs_ereport_is_duplicate(subclass, spa, vd, zb, zio, 0, 0))
return (SET_ERROR(EALREADY));
if (zfs_is_ratelimiting_event(subclass, vd))
return (SET_ERROR(EBUSY));
if (!zfs_ereport_start(&ereport, &detector, subclass, spa, vd,
zb, zio, state, 0))
return (SET_ERROR(EINVAL)); /* couldn't post event */
if (ereport == NULL)
return (SET_ERROR(EINVAL));
/* Cleanup is handled by the callback function */
rc = zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
#endif
return (rc);
}
/*
* Prepare a checksum ereport
*
* Returns
* - 0 if an event was posted
* - EINVAL if there was a problem posting event
* - EBUSY if the event was rate limited
* - EALREADY if the event was already posted (duplicate)
*/
int
zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb,
struct zio *zio, uint64_t offset, uint64_t length, zio_bad_cksum_t *info)
{
zio_cksum_report_t *report;
#ifdef _KERNEL
if (!zfs_ereport_is_valid(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio))
return (SET_ERROR(EINVAL));
if (zfs_ereport_is_duplicate(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zb, zio,
offset, length))
return (SET_ERROR(EALREADY));
if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM, vd))
return (SET_ERROR(EBUSY));
#endif
report = kmem_zalloc(sizeof (*report), KM_SLEEP);
zio_vsd_default_cksum_report(zio, report);
/* copy the checksum failure information if it was provided */
if (info != NULL) {
report->zcr_ckinfo = kmem_zalloc(sizeof (*info), KM_SLEEP);
bcopy(info, report->zcr_ckinfo, sizeof (*info));
}
report->zcr_sector = 1ULL << vd->vdev_top->vdev_ashift;
report->zcr_align =
vdev_psize_to_asize(vd->vdev_top, report->zcr_sector);
report->zcr_length = length;
#ifdef _KERNEL
(void) zfs_ereport_start(&report->zcr_ereport, &report->zcr_detector,
FM_EREPORT_ZFS_CHECKSUM, spa, vd, zb, zio, offset, length);
if (report->zcr_ereport == NULL) {
zfs_ereport_free_checksum(report);
return (0);
}
#endif
mutex_enter(&spa->spa_errlist_lock);
report->zcr_next = zio->io_logical->io_cksum_report;
zio->io_logical->io_cksum_report = report;
mutex_exit(&spa->spa_errlist_lock);
return (0);
}
void
zfs_ereport_finish_checksum(zio_cksum_report_t *report, const abd_t *good_data,
const abd_t *bad_data, boolean_t drop_if_identical)
{
#ifdef _KERNEL
zfs_ecksum_info_t *info;
info = annotate_ecksum(report->zcr_ereport, report->zcr_ckinfo,
good_data, bad_data, report->zcr_length, drop_if_identical);
if (info != NULL)
zfs_zevent_post(report->zcr_ereport,
report->zcr_detector, zfs_zevent_post_cb);
else
zfs_zevent_post_cb(report->zcr_ereport, report->zcr_detector);
report->zcr_ereport = report->zcr_detector = NULL;
if (info != NULL)
kmem_free(info, sizeof (*info));
#endif
}
void
zfs_ereport_free_checksum(zio_cksum_report_t *rpt)
{
#ifdef _KERNEL
if (rpt->zcr_ereport != NULL) {
fm_nvlist_destroy(rpt->zcr_ereport,
FM_NVA_FREE);
fm_nvlist_destroy(rpt->zcr_detector,
FM_NVA_FREE);
}
#endif
rpt->zcr_free(rpt->zcr_cbdata, rpt->zcr_cbinfo);
if (rpt->zcr_ckinfo != NULL)
kmem_free(rpt->zcr_ckinfo, sizeof (*rpt->zcr_ckinfo));
kmem_free(rpt, sizeof (*rpt));
}
/*
* Post a checksum ereport
*
* Returns
* - 0 if an event was posted
* - EINVAL if there was a problem posting event
* - EBUSY if the event was rate limited
* - EALREADY if the event was already posted (duplicate)
*/
int
zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd, const zbookmark_phys_t *zb,
struct zio *zio, uint64_t offset, uint64_t length,
const abd_t *good_data, const abd_t *bad_data, zio_bad_cksum_t *zbc)
{
int rc = 0;
#ifdef _KERNEL
nvlist_t *ereport = NULL;
nvlist_t *detector = NULL;
zfs_ecksum_info_t *info;
if (!zfs_ereport_is_valid(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zio))
return (SET_ERROR(EINVAL));
if (zfs_ereport_is_duplicate(FM_EREPORT_ZFS_CHECKSUM, spa, vd, zb, zio,
offset, length))
return (SET_ERROR(EALREADY));
if (zfs_is_ratelimiting_event(FM_EREPORT_ZFS_CHECKSUM, vd))
return (SET_ERROR(EBUSY));
if (!zfs_ereport_start(&ereport, &detector, FM_EREPORT_ZFS_CHECKSUM,
spa, vd, zb, zio, offset, length) || (ereport == NULL)) {
return (SET_ERROR(EINVAL));
}
info = annotate_ecksum(ereport, zbc, good_data, bad_data, length,
B_FALSE);
if (info != NULL) {
rc = zfs_zevent_post(ereport, detector, zfs_zevent_post_cb);
kmem_free(info, sizeof (*info));
}
#endif
return (rc);
}
/*
* The 'sysevent.fs.zfs.*' events are signals posted to notify user space of
* change in the pool. All sysevents are listed in sys/sysevent/eventdefs.h
* and are designed to be consumed by the ZFS Event Daemon (ZED). For
* additional details refer to the zed(8) man page.
*/
nvlist_t *
zfs_event_create(spa_t *spa, vdev_t *vd, const char *type, const char *name,
nvlist_t *aux)
{
nvlist_t *resource = NULL;
#ifdef _KERNEL
char class[64];
if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
return (NULL);
if ((resource = fm_nvlist_create(NULL)) == NULL)
return (NULL);
(void) snprintf(class, sizeof (class), "%s.%s.%s", type,
ZFS_ERROR_CLASS, name);
VERIFY0(nvlist_add_uint8(resource, FM_VERSION, FM_RSRC_VERSION));
VERIFY0(nvlist_add_string(resource, FM_CLASS, class));
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL, spa_name(spa)));
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL_GUID, spa_guid(spa)));
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL_STATE, spa_state(spa)));
VERIFY0(nvlist_add_int32(resource,
FM_EREPORT_PAYLOAD_ZFS_POOL_CONTEXT, spa_load_state(spa)));
if (vd) {
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_GUID, vd->vdev_guid));
VERIFY0(nvlist_add_uint64(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE, vd->vdev_state));
if (vd->vdev_path != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_PATH, vd->vdev_path));
if (vd->vdev_devid != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_DEVID, vd->vdev_devid));
if (vd->vdev_fru != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_FRU, vd->vdev_fru));
if (vd->vdev_enc_sysfs_path != NULL)
VERIFY0(nvlist_add_string(resource,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
vd->vdev_enc_sysfs_path));
}
/* also copy any optional payload data */
if (aux) {
nvpair_t *elem = NULL;
while ((elem = nvlist_next_nvpair(aux, elem)) != NULL)
(void) nvlist_add_nvpair(resource, elem);
}
#endif
return (resource);
}
static void
zfs_post_common(spa_t *spa, vdev_t *vd, const char *type, const char *name,
nvlist_t *aux)
{
#ifdef _KERNEL
nvlist_t *resource;
resource = zfs_event_create(spa, vd, type, name, aux);
if (resource)
zfs_zevent_post(resource, NULL, zfs_zevent_post_cb);
#endif
}
/*
* The 'resource.fs.zfs.removed' event is an internal signal that the given vdev
* has been removed from the system. This will cause the DE to ignore any
* recent I/O errors, inferring that they are due to the asynchronous device
* removal.
*/
void
zfs_post_remove(spa_t *spa, vdev_t *vd)
{
zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_REMOVED, NULL);
}
/*
* The 'resource.fs.zfs.autoreplace' event is an internal signal that the pool
* has the 'autoreplace' property set, and therefore any broken vdevs will be
* handled by higher level logic, and no vdev fault should be generated.
*/
void
zfs_post_autoreplace(spa_t *spa, vdev_t *vd)
{
zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_AUTOREPLACE, NULL);
}
/*
* The 'resource.fs.zfs.statechange' event is an internal signal that the
* given vdev has transitioned its state to DEGRADED or HEALTHY. This will
* cause the retire agent to repair any outstanding fault management cases
* open because the device was not found (fault.fs.zfs.device).
*/
void
zfs_post_state_change(spa_t *spa, vdev_t *vd, uint64_t laststate)
{
#ifdef _KERNEL
nvlist_t *aux;
/*
* Add optional supplemental keys to payload
*/
aux = fm_nvlist_create(NULL);
if (vd && aux) {
if (vd->vdev_physpath) {
(void) nvlist_add_string(aux,
FM_EREPORT_PAYLOAD_ZFS_VDEV_PHYSPATH,
vd->vdev_physpath);
}
if (vd->vdev_enc_sysfs_path) {
(void) nvlist_add_string(aux,
FM_EREPORT_PAYLOAD_ZFS_VDEV_ENC_SYSFS_PATH,
vd->vdev_enc_sysfs_path);
}
(void) nvlist_add_uint64(aux,
FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE, laststate);
}
zfs_post_common(spa, vd, FM_RSRC_CLASS, FM_RESOURCE_STATECHANGE,
aux);
if (aux)
fm_nvlist_destroy(aux, FM_NVA_FREE);
#endif
}
#ifdef _KERNEL
void
zfs_ereport_init(void)
{
mutex_init(&recent_events_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&recent_events_list, sizeof (recent_events_node_t),
offsetof(recent_events_node_t, re_list_link));
avl_create(&recent_events_tree, recent_events_compare,
sizeof (recent_events_node_t), offsetof(recent_events_node_t,
re_tree_link));
}
/*
* This 'early' fini needs to run before zfs_fini() which on Linux waits
* for the system_delay_taskq to drain.
*/
void
zfs_ereport_taskq_fini(void)
{
mutex_enter(&recent_events_lock);
if (recent_events_cleaner_tqid != 0) {
taskq_cancel_id(system_delay_taskq, recent_events_cleaner_tqid);
recent_events_cleaner_tqid = 0;
}
mutex_exit(&recent_events_lock);
}
void
zfs_ereport_fini(void)
{
recent_events_node_t *entry;
while ((entry = list_head(&recent_events_list)) != NULL) {
avl_remove(&recent_events_tree, entry);
list_remove(&recent_events_list, entry);
kmem_free(entry, sizeof (*entry));
}
avl_destroy(&recent_events_tree);
list_destroy(&recent_events_list);
mutex_destroy(&recent_events_lock);
}
+void
+zfs_ereport_snapshot_post(const char *subclass, spa_t *spa, const char *name)
+{
+ nvlist_t *aux;
+
+ aux = fm_nvlist_create(NULL);
+ nvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_SNAPSHOT_NAME, name);
+
+ zfs_post_common(spa, NULL, FM_RSRC_CLASS, subclass, aux);
+ fm_nvlist_destroy(aux, FM_NVA_FREE);
+}
+
+/*
+ * Post when a event when a zvol is created or removed
+ *
+ * This is currently only used by macOS, since it uses the event to create
+ * symlinks between the volume name (mypool/myvol) and the actual /dev
+ * device (/dev/disk3). For example:
+ *
+ * /var/run/zfs/dsk/mypool/myvol -> /dev/disk3
+ *
+ * name: The full name of the zvol ("mypool/myvol")
+ * dev_name: The full /dev name for the zvol ("/dev/disk3")
+ * raw_name: The raw /dev name for the zvol ("/dev/rdisk3")
+ */
+void
+zfs_ereport_zvol_post(const char *subclass, const char *name,
+ const char *dev_name, const char *raw_name)
+{
+ nvlist_t *aux;
+ char *r;
+
+ boolean_t locked = mutex_owned(&spa_namespace_lock);
+ if (!locked) mutex_enter(&spa_namespace_lock);
+ spa_t *spa = spa_lookup(name);
+ if (!locked) mutex_exit(&spa_namespace_lock);
+
+ if (spa == NULL)
+ return;
+
+ aux = fm_nvlist_create(NULL);
+ nvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_DEVICE_NAME, dev_name);
+ nvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_RAW_DEVICE_NAME,
+ raw_name);
+ r = strchr(name, '/');
+ if (r && r[1])
+ nvlist_add_string(aux, FM_EREPORT_PAYLOAD_ZFS_VOLUME, &r[1]);
+
+ zfs_post_common(spa, NULL, FM_RSRC_CLASS, subclass, aux);
+ fm_nvlist_destroy(aux, FM_NVA_FREE);
+}
+
EXPORT_SYMBOL(zfs_ereport_post);
EXPORT_SYMBOL(zfs_ereport_is_valid);
EXPORT_SYMBOL(zfs_ereport_post_checksum);
EXPORT_SYMBOL(zfs_post_remove);
EXPORT_SYMBOL(zfs_post_autoreplace);
EXPORT_SYMBOL(zfs_post_state_change);
ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, retain_max, UINT, ZMOD_RW,
"Maximum recent zevents records to retain for duplicate checking");
ZFS_MODULE_PARAM(zfs_zevent, zfs_zevent_, retain_expire_secs, UINT, ZMOD_RW,
"Expiration time for recent zevents records");
#endif /* _KERNEL */
diff --git a/sys/contrib/openzfs/module/zfs/zfs_replay.c b/sys/contrib/openzfs/module/zfs/zfs_replay.c
index 9073888dbab6..e6ed3e738e40 100644
--- a/sys/contrib/openzfs/module/zfs/zfs_replay.c
+++ b/sys/contrib/openzfs/module/zfs/zfs_replay.c
@@ -1,992 +1,992 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012 Cyril Plisko. All rights reserved.
* Copyright (c) 2013, 2017 by Delphix. All rights reserved.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/sysmacros.h>
#include <sys/cmn_err.h>
#include <sys/kmem.h>
#include <sys/thread.h>
#include <sys/file.h>
#include <sys/fcntl.h>
#include <sys/vfs.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_vnops.h>
#include <sys/spa.h>
#include <sys/zil.h>
#include <sys/byteorder.h>
#include <sys/stat.h>
#include <sys/acl.h>
#include <sys/atomic.h>
#include <sys/cred.h>
#include <sys/zpl.h>
/*
* NB: FreeBSD expects to be able to do vnode locking in lookup and
* hold the locks across all subsequent VOPs until vput is called.
* This means that its zfs vnops routines can't do any internal locking.
* In order to have the same contract as the Linux vnops there would
* needed to be duplicate locked vnops. If the vnops were used more widely
* in common code this would likely be preferable. However, currently
* this is the only file where this is the case.
*/
/*
* Functions to replay ZFS intent log (ZIL) records
* The functions are called through a function vector (zfs_replay_vector)
* which is indexed by the transaction type.
*/
static void
zfs_init_vattr(vattr_t *vap, uint64_t mask, uint64_t mode,
uint64_t uid, uint64_t gid, uint64_t rdev, uint64_t nodeid)
{
bzero(vap, sizeof (*vap));
vap->va_mask = (uint_t)mask;
vap->va_mode = mode;
#if defined(__FreeBSD__) || defined(__APPLE__)
vap->va_type = IFTOVT(mode);
#endif
vap->va_uid = (uid_t)(IS_EPHEMERAL(uid)) ? -1 : uid;
vap->va_gid = (gid_t)(IS_EPHEMERAL(gid)) ? -1 : gid;
vap->va_rdev = zfs_cmpldev(rdev);
vap->va_nodeid = nodeid;
}
/* ARGSUSED */
static int
zfs_replay_error(void *arg1, void *arg2, boolean_t byteswap)
{
return (SET_ERROR(ENOTSUP));
}
static void
zfs_replay_xvattr(lr_attr_t *lrattr, xvattr_t *xvap)
{
xoptattr_t *xoap = NULL;
uint64_t *attrs;
uint64_t *crtime;
uint32_t *bitmap;
void *scanstamp;
int i;
xvap->xva_vattr.va_mask |= ATTR_XVATTR;
if ((xoap = xva_getxoptattr(xvap)) == NULL) {
xvap->xva_vattr.va_mask &= ~ATTR_XVATTR; /* shouldn't happen */
return;
}
ASSERT(lrattr->lr_attr_masksize == xvap->xva_mapsize);
bitmap = &lrattr->lr_attr_bitmap;
for (i = 0; i != lrattr->lr_attr_masksize; i++, bitmap++)
xvap->xva_reqattrmap[i] = *bitmap;
attrs = (uint64_t *)(lrattr + lrattr->lr_attr_masksize - 1);
crtime = attrs + 1;
scanstamp = (caddr_t)(crtime + 2);
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN))
xoap->xoa_hidden = ((*attrs & XAT0_HIDDEN) != 0);
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM))
xoap->xoa_system = ((*attrs & XAT0_SYSTEM) != 0);
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE))
xoap->xoa_archive = ((*attrs & XAT0_ARCHIVE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_READONLY))
xoap->xoa_readonly = ((*attrs & XAT0_READONLY) != 0);
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE))
xoap->xoa_immutable = ((*attrs & XAT0_IMMUTABLE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK))
xoap->xoa_nounlink = ((*attrs & XAT0_NOUNLINK) != 0);
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY))
xoap->xoa_appendonly = ((*attrs & XAT0_APPENDONLY) != 0);
if (XVA_ISSET_REQ(xvap, XAT_NODUMP))
xoap->xoa_nodump = ((*attrs & XAT0_NODUMP) != 0);
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE))
xoap->xoa_opaque = ((*attrs & XAT0_OPAQUE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED))
xoap->xoa_av_modified = ((*attrs & XAT0_AV_MODIFIED) != 0);
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED))
xoap->xoa_av_quarantined =
((*attrs & XAT0_AV_QUARANTINED) != 0);
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME))
ZFS_TIME_DECODE(&xoap->xoa_createtime, crtime);
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) {
ASSERT(!XVA_ISSET_REQ(xvap, XAT_PROJID));
bcopy(scanstamp, xoap->xoa_av_scanstamp, AV_SCANSTAMP_SZ);
} else if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
/*
* XAT_PROJID and XAT_AV_SCANSTAMP will never be valid
* at the same time, so we can share the same space.
*/
bcopy(scanstamp, &xoap->xoa_projid, sizeof (uint64_t));
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE))
xoap->xoa_reparse = ((*attrs & XAT0_REPARSE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE))
xoap->xoa_offline = ((*attrs & XAT0_OFFLINE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_SPARSE))
xoap->xoa_sparse = ((*attrs & XAT0_SPARSE) != 0);
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT))
xoap->xoa_projinherit = ((*attrs & XAT0_PROJINHERIT) != 0);
}
static int
zfs_replay_domain_cnt(uint64_t uid, uint64_t gid)
{
uint64_t uid_idx;
uint64_t gid_idx;
int domcnt = 0;
uid_idx = FUID_INDEX(uid);
gid_idx = FUID_INDEX(gid);
if (uid_idx)
domcnt++;
if (gid_idx > 0 && gid_idx != uid_idx)
domcnt++;
return (domcnt);
}
static void *
zfs_replay_fuid_domain_common(zfs_fuid_info_t *fuid_infop, void *start,
int domcnt)
{
int i;
for (i = 0; i != domcnt; i++) {
fuid_infop->z_domain_table[i] = start;
start = (caddr_t)start + strlen(start) + 1;
}
return (start);
}
/*
* Set the uid/gid in the fuid_info structure.
*/
static void
zfs_replay_fuid_ugid(zfs_fuid_info_t *fuid_infop, uint64_t uid, uint64_t gid)
{
/*
* If owner or group are log specific FUIDs then slurp up
* domain information and build zfs_fuid_info_t
*/
if (IS_EPHEMERAL(uid))
fuid_infop->z_fuid_owner = uid;
if (IS_EPHEMERAL(gid))
fuid_infop->z_fuid_group = gid;
}
/*
* Load fuid domains into fuid_info_t
*/
static zfs_fuid_info_t *
zfs_replay_fuid_domain(void *buf, void **end, uint64_t uid, uint64_t gid)
{
int domcnt;
zfs_fuid_info_t *fuid_infop;
fuid_infop = zfs_fuid_info_alloc();
domcnt = zfs_replay_domain_cnt(uid, gid);
if (domcnt == 0)
return (fuid_infop);
fuid_infop->z_domain_table =
kmem_zalloc(domcnt * sizeof (char *), KM_SLEEP);
zfs_replay_fuid_ugid(fuid_infop, uid, gid);
fuid_infop->z_domain_cnt = domcnt;
*end = zfs_replay_fuid_domain_common(fuid_infop, buf, domcnt);
return (fuid_infop);
}
/*
* load zfs_fuid_t's and fuid_domains into fuid_info_t
*/
static zfs_fuid_info_t *
zfs_replay_fuids(void *start, void **end, int idcnt, int domcnt, uint64_t uid,
uint64_t gid)
{
uint64_t *log_fuid = (uint64_t *)start;
zfs_fuid_info_t *fuid_infop;
int i;
fuid_infop = zfs_fuid_info_alloc();
fuid_infop->z_domain_cnt = domcnt;
fuid_infop->z_domain_table =
kmem_zalloc(domcnt * sizeof (char *), KM_SLEEP);
for (i = 0; i != idcnt; i++) {
zfs_fuid_t *zfuid;
zfuid = kmem_alloc(sizeof (zfs_fuid_t), KM_SLEEP);
zfuid->z_logfuid = *log_fuid;
zfuid->z_id = -1;
zfuid->z_domidx = 0;
list_insert_tail(&fuid_infop->z_fuids, zfuid);
log_fuid++;
}
zfs_replay_fuid_ugid(fuid_infop, uid, gid);
*end = zfs_replay_fuid_domain_common(fuid_infop, log_fuid, domcnt);
return (fuid_infop);
}
static void
zfs_replay_swap_attrs(lr_attr_t *lrattr)
{
/* swap the lr_attr structure */
byteswap_uint32_array(lrattr, sizeof (*lrattr));
/* swap the bitmap */
byteswap_uint32_array(lrattr + 1, (lrattr->lr_attr_masksize - 1) *
sizeof (uint32_t));
/* swap the attributes, create time + 64 bit word for attributes */
byteswap_uint64_array((caddr_t)(lrattr + 1) + (sizeof (uint32_t) *
(lrattr->lr_attr_masksize - 1)), 3 * sizeof (uint64_t));
}
/*
* Replay file create with optional ACL, xvattr information as well
* as option FUID information.
*/
static int
zfs_replay_create_acl(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_acl_create_t *lracl = arg2;
char *name = NULL; /* location determined later */
lr_create_t *lr = (lr_create_t *)lracl;
znode_t *dzp;
znode_t *zp;
xvattr_t xva;
int vflg = 0;
vsecattr_t vsec = { 0 };
lr_attr_t *lrattr;
void *aclstart;
void *fuidstart;
size_t xvatlen = 0;
uint64_t txtype;
uint64_t objid;
uint64_t dnodesize;
int error;
txtype = (lr->lr_common.lrc_txtype & ~TX_CI);
if (byteswap) {
byteswap_uint64_array(lracl, sizeof (*lracl));
if (txtype == TX_CREATE_ACL_ATTR ||
txtype == TX_MKDIR_ACL_ATTR) {
lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
zfs_replay_swap_attrs(lrattr);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
}
aclstart = (caddr_t)(lracl + 1) + xvatlen;
zfs_ace_byteswap(aclstart, lracl->lr_acl_bytes, B_FALSE);
/* swap fuids */
if (lracl->lr_fuidcnt) {
byteswap_uint64_array((caddr_t)aclstart +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes),
lracl->lr_fuidcnt * sizeof (uint64_t));
}
}
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
objid = LR_FOID_GET_OBJ(lr->lr_foid);
dnodesize = LR_FOID_GET_SLOTS(lr->lr_foid) << DNODE_SHIFT;
xva_init(&xva);
zfs_init_vattr(&xva.xva_vattr, ATTR_MODE | ATTR_UID | ATTR_GID,
lr->lr_mode, lr->lr_uid, lr->lr_gid, lr->lr_rdev, objid);
/*
* All forms of zfs create (create, mkdir, mkxattrdir, symlink)
* eventually end up in zfs_mknode(), which assigns the object's
* creation time, generation number, and dnode size. The generic
* zfs_create() has no concept of these attributes, so we smuggle
* the values inside the vattr's otherwise unused va_ctime,
* va_nblocks, and va_fsid fields.
*/
ZFS_TIME_DECODE(&xva.xva_vattr.va_ctime, lr->lr_crtime);
xva.xva_vattr.va_nblocks = lr->lr_gen;
xva.xva_vattr.va_fsid = dnodesize;
error = dnode_try_claim(zfsvfs->z_os, objid, dnodesize >> DNODE_SHIFT);
if (error)
goto bail;
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
switch (txtype) {
case TX_CREATE_ACL:
aclstart = (caddr_t)(lracl + 1);
fuidstart = (caddr_t)aclstart +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
- /* FALLTHROUGH */
+ fallthrough;
case TX_CREATE_ACL_ATTR:
if (name == NULL) {
lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
xva.xva_vattr.va_mask |= ATTR_XVATTR;
zfs_replay_xvattr(lrattr, &xva);
}
vsec.vsa_mask = VSA_ACE | VSA_ACE_ACLFLAGS;
vsec.vsa_aclentp = (caddr_t)(lracl + 1) + xvatlen;
vsec.vsa_aclcnt = lracl->lr_aclcnt;
vsec.vsa_aclentsz = lracl->lr_acl_bytes;
vsec.vsa_aclflags = lracl->lr_acl_flags;
if (zfsvfs->z_fuid_replay == NULL) {
fuidstart = (caddr_t)(lracl + 1) + xvatlen +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zfsvfs->z_fuid_replay =
zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
}
error = zfs_create(dzp, name, &xva.xva_vattr,
0, 0, &zp, kcred, vflg, &vsec);
break;
case TX_MKDIR_ACL:
aclstart = (caddr_t)(lracl + 1);
fuidstart = (caddr_t)aclstart +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zfsvfs->z_fuid_replay = zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
- /* FALLTHROUGH */
+ fallthrough;
case TX_MKDIR_ACL_ATTR:
if (name == NULL) {
lrattr = (lr_attr_t *)(caddr_t)(lracl + 1);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
zfs_replay_xvattr(lrattr, &xva);
}
vsec.vsa_mask = VSA_ACE | VSA_ACE_ACLFLAGS;
vsec.vsa_aclentp = (caddr_t)(lracl + 1) + xvatlen;
vsec.vsa_aclcnt = lracl->lr_aclcnt;
vsec.vsa_aclentsz = lracl->lr_acl_bytes;
vsec.vsa_aclflags = lracl->lr_acl_flags;
if (zfsvfs->z_fuid_replay == NULL) {
fuidstart = (caddr_t)(lracl + 1) + xvatlen +
ZIL_ACE_LENGTH(lracl->lr_acl_bytes);
zfsvfs->z_fuid_replay =
zfs_replay_fuids(fuidstart,
(void *)&name, lracl->lr_fuidcnt, lracl->lr_domcnt,
lr->lr_uid, lr->lr_gid);
}
error = zfs_mkdir(dzp, name, &xva.xva_vattr,
&zp, kcred, vflg, &vsec);
break;
default:
error = SET_ERROR(ENOTSUP);
}
bail:
if (error == 0 && zp != NULL) {
#ifdef __FreeBSD__
VOP_UNLOCK1(ZTOV(zp));
#endif
zrele(zp);
}
zrele(dzp);
if (zfsvfs->z_fuid_replay)
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
return (error);
}
static int
zfs_replay_create(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_create_t *lr = arg2;
char *name = NULL; /* location determined later */
char *link; /* symlink content follows name */
znode_t *dzp;
znode_t *zp = NULL;
xvattr_t xva;
int vflg = 0;
size_t lrsize = sizeof (lr_create_t);
lr_attr_t *lrattr;
void *start;
size_t xvatlen;
uint64_t txtype;
uint64_t objid;
uint64_t dnodesize;
int error;
txtype = (lr->lr_common.lrc_txtype & ~TX_CI);
if (byteswap) {
byteswap_uint64_array(lr, sizeof (*lr));
if (txtype == TX_CREATE_ATTR || txtype == TX_MKDIR_ATTR)
zfs_replay_swap_attrs((lr_attr_t *)(lr + 1));
}
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
objid = LR_FOID_GET_OBJ(lr->lr_foid);
dnodesize = LR_FOID_GET_SLOTS(lr->lr_foid) << DNODE_SHIFT;
xva_init(&xva);
zfs_init_vattr(&xva.xva_vattr, ATTR_MODE | ATTR_UID | ATTR_GID,
lr->lr_mode, lr->lr_uid, lr->lr_gid, lr->lr_rdev, objid);
/*
* All forms of zfs create (create, mkdir, mkxattrdir, symlink)
* eventually end up in zfs_mknode(), which assigns the object's
* creation time, generation number, and dnode slot count. The
* generic zfs_create() has no concept of these attributes, so
* we smuggle the values inside the vattr's otherwise unused
* va_ctime, va_nblocks, and va_fsid fields.
*/
ZFS_TIME_DECODE(&xva.xva_vattr.va_ctime, lr->lr_crtime);
xva.xva_vattr.va_nblocks = lr->lr_gen;
xva.xva_vattr.va_fsid = dnodesize;
error = dnode_try_claim(zfsvfs->z_os, objid, dnodesize >> DNODE_SHIFT);
if (error)
goto out;
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
/*
* Symlinks don't have fuid info, and CIFS never creates
* symlinks.
*
* The _ATTR versions will grab the fuid info in their subcases.
*/
if ((int)lr->lr_common.lrc_txtype != TX_SYMLINK &&
(int)lr->lr_common.lrc_txtype != TX_MKDIR_ATTR &&
(int)lr->lr_common.lrc_txtype != TX_CREATE_ATTR) {
start = (lr + 1);
zfsvfs->z_fuid_replay =
zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
}
switch (txtype) {
case TX_CREATE_ATTR:
lrattr = (lr_attr_t *)(caddr_t)(lr + 1);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva);
start = (caddr_t)(lr + 1) + xvatlen;
zfsvfs->z_fuid_replay =
zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
name = (char *)start;
- /* FALLTHROUGH */
+ fallthrough;
case TX_CREATE:
if (name == NULL)
name = (char *)start;
error = zfs_create(dzp, name, &xva.xva_vattr,
0, 0, &zp, kcred, vflg, NULL);
break;
case TX_MKDIR_ATTR:
lrattr = (lr_attr_t *)(caddr_t)(lr + 1);
xvatlen = ZIL_XVAT_SIZE(lrattr->lr_attr_masksize);
zfs_replay_xvattr((lr_attr_t *)((caddr_t)lr + lrsize), &xva);
start = (caddr_t)(lr + 1) + xvatlen;
zfsvfs->z_fuid_replay =
zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
name = (char *)start;
- /* FALLTHROUGH */
+ fallthrough;
case TX_MKDIR:
if (name == NULL)
name = (char *)(lr + 1);
error = zfs_mkdir(dzp, name, &xva.xva_vattr,
&zp, kcred, vflg, NULL);
break;
case TX_MKXATTR:
error = zfs_make_xattrdir(dzp, &xva.xva_vattr, &zp, kcred);
break;
case TX_SYMLINK:
name = (char *)(lr + 1);
link = name + strlen(name) + 1;
error = zfs_symlink(dzp, name, &xva.xva_vattr,
link, &zp, kcred, vflg);
break;
default:
error = SET_ERROR(ENOTSUP);
}
out:
if (error == 0 && zp != NULL) {
#ifdef __FreeBSD__
VOP_UNLOCK1(ZTOV(zp));
#endif
zrele(zp);
}
zrele(dzp);
if (zfsvfs->z_fuid_replay)
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
return (error);
}
static int
zfs_replay_remove(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_remove_t *lr = arg2;
char *name = (char *)(lr + 1); /* name follows lr_remove_t */
znode_t *dzp;
int error;
int vflg = 0;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
switch ((int)lr->lr_common.lrc_txtype) {
case TX_REMOVE:
error = zfs_remove(dzp, name, kcred, vflg);
break;
case TX_RMDIR:
error = zfs_rmdir(dzp, name, NULL, kcred, vflg);
break;
default:
error = SET_ERROR(ENOTSUP);
}
zrele(dzp);
return (error);
}
static int
zfs_replay_link(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_link_t *lr = arg2;
char *name = (char *)(lr + 1); /* name follows lr_link_t */
znode_t *dzp, *zp;
int error;
int vflg = 0;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_doid, &dzp)) != 0)
return (error);
if ((error = zfs_zget(zfsvfs, lr->lr_link_obj, &zp)) != 0) {
zrele(dzp);
return (error);
}
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
error = zfs_link(dzp, zp, name, kcred, vflg);
zrele(zp);
zrele(dzp);
return (error);
}
static int
zfs_replay_rename(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_rename_t *lr = arg2;
char *sname = (char *)(lr + 1); /* sname and tname follow lr_rename_t */
char *tname = sname + strlen(sname) + 1;
znode_t *sdzp, *tdzp;
int error;
int vflg = 0;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_sdoid, &sdzp)) != 0)
return (error);
if ((error = zfs_zget(zfsvfs, lr->lr_tdoid, &tdzp)) != 0) {
zrele(sdzp);
return (error);
}
if (lr->lr_common.lrc_txtype & TX_CI)
vflg |= FIGNORECASE;
error = zfs_rename(sdzp, sname, tdzp, tname, kcred, vflg);
zrele(tdzp);
zrele(sdzp);
return (error);
}
static int
zfs_replay_write(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_write_t *lr = arg2;
char *data = (char *)(lr + 1); /* data follows lr_write_t */
znode_t *zp;
int error;
uint64_t eod, offset, length;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0) {
/*
* As we can log writes out of order, it's possible the
* file has been removed. In this case just drop the write
* and return success.
*/
if (error == ENOENT)
error = 0;
return (error);
}
offset = lr->lr_offset;
length = lr->lr_length;
eod = offset + length; /* end of data for this write */
/*
* This may be a write from a dmu_sync() for a whole block,
* and may extend beyond the current end of the file.
* We can't just replay what was written for this TX_WRITE as
* a future TX_WRITE2 may extend the eof and the data for that
* write needs to be there. So we write the whole block and
* reduce the eof. This needs to be done within the single dmu
* transaction created within vn_rdwr -> zfs_write. So a possible
* new end of file is passed through in zfsvfs->z_replay_eof
*/
zfsvfs->z_replay_eof = 0; /* 0 means don't change end of file */
/* If it's a dmu_sync() block, write the whole block */
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
if (length < blocksize) {
offset -= offset % blocksize;
length = blocksize;
}
if (zp->z_size < eod)
zfsvfs->z_replay_eof = eod;
}
error = zfs_write_simple(zp, data, length, offset, NULL);
zrele(zp);
zfsvfs->z_replay_eof = 0; /* safety */
return (error);
}
/*
* TX_WRITE2 are only generated when dmu_sync() returns EALREADY
* meaning the pool block is already being synced. So now that we always write
* out full blocks, all we have to do is expand the eof if
* the file is grown.
*/
static int
zfs_replay_write2(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_write_t *lr = arg2;
znode_t *zp;
int error;
uint64_t end;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
top:
end = lr->lr_offset + lr->lr_length;
if (end > zp->z_size) {
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
zp->z_size = end;
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
zrele(zp);
if (error == ERESTART) {
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
return (error);
}
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
(void *)&zp->z_size, sizeof (uint64_t), tx);
/* Ensure the replayed seq is updated */
(void) zil_replaying(zfsvfs->z_log, tx);
dmu_tx_commit(tx);
}
zrele(zp);
return (error);
}
static int
zfs_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_truncate_t *lr = arg2;
znode_t *zp;
flock64_t fl;
int error;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
bzero(&fl, sizeof (fl));
fl.l_type = F_WRLCK;
fl.l_whence = SEEK_SET;
fl.l_start = lr->lr_offset;
fl.l_len = lr->lr_length;
error = zfs_space(zp, F_FREESP, &fl, O_RDWR | O_LARGEFILE,
lr->lr_offset, kcred);
zrele(zp);
return (error);
}
static int
zfs_replay_setattr(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_setattr_t *lr = arg2;
znode_t *zp;
xvattr_t xva;
vattr_t *vap = &xva.xva_vattr;
int error;
void *start;
xva_init(&xva);
if (byteswap) {
byteswap_uint64_array(lr, sizeof (*lr));
if ((lr->lr_mask & ATTR_XVATTR) &&
zfsvfs->z_version >= ZPL_VERSION_INITIAL)
zfs_replay_swap_attrs((lr_attr_t *)(lr + 1));
}
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
zfs_init_vattr(vap, lr->lr_mask, lr->lr_mode,
lr->lr_uid, lr->lr_gid, 0, lr->lr_foid);
vap->va_size = lr->lr_size;
ZFS_TIME_DECODE(&vap->va_atime, lr->lr_atime);
ZFS_TIME_DECODE(&vap->va_mtime, lr->lr_mtime);
gethrestime(&vap->va_ctime);
vap->va_mask |= ATTR_CTIME;
/*
* Fill in xvattr_t portions if necessary.
*/
start = (lr_setattr_t *)(lr + 1);
if (vap->va_mask & ATTR_XVATTR) {
zfs_replay_xvattr((lr_attr_t *)start, &xva);
start = (caddr_t)start +
ZIL_XVAT_SIZE(((lr_attr_t *)start)->lr_attr_masksize);
} else
xva.xva_vattr.va_mask &= ~ATTR_XVATTR;
zfsvfs->z_fuid_replay = zfs_replay_fuid_domain(start, &start,
lr->lr_uid, lr->lr_gid);
error = zfs_setattr(zp, vap, 0, kcred);
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
zrele(zp);
return (error);
}
static int
zfs_replay_acl_v0(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_acl_v0_t *lr = arg2;
ace_t *ace = (ace_t *)(lr + 1); /* ace array follows lr_acl_t */
vsecattr_t vsa;
znode_t *zp;
int error;
if (byteswap) {
byteswap_uint64_array(lr, sizeof (*lr));
zfs_oldace_byteswap(ace, lr->lr_aclcnt);
}
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
bzero(&vsa, sizeof (vsa));
vsa.vsa_mask = VSA_ACE | VSA_ACECNT;
vsa.vsa_aclcnt = lr->lr_aclcnt;
vsa.vsa_aclentsz = sizeof (ace_t) * vsa.vsa_aclcnt;
vsa.vsa_aclflags = 0;
vsa.vsa_aclentp = ace;
error = zfs_setsecattr(zp, &vsa, 0, kcred);
zrele(zp);
return (error);
}
/*
* Replaying ACLs is complicated by FUID support.
* The log record may contain some optional data
* to be used for replaying FUID's. These pieces
* are the actual FUIDs that were created initially.
* The FUID table index may no longer be valid and
* during zfs_create() a new index may be assigned.
* Because of this the log will contain the original
* domain+rid in order to create a new FUID.
*
* The individual ACEs may contain an ephemeral uid/gid which is no
* longer valid and will need to be replaced with an actual FUID.
*
*/
static int
zfs_replay_acl(void *arg1, void *arg2, boolean_t byteswap)
{
zfsvfs_t *zfsvfs = arg1;
lr_acl_t *lr = arg2;
ace_t *ace = (ace_t *)(lr + 1);
vsecattr_t vsa;
znode_t *zp;
int error;
if (byteswap) {
byteswap_uint64_array(lr, sizeof (*lr));
zfs_ace_byteswap(ace, lr->lr_acl_bytes, B_FALSE);
if (lr->lr_fuidcnt) {
byteswap_uint64_array((caddr_t)ace +
ZIL_ACE_LENGTH(lr->lr_acl_bytes),
lr->lr_fuidcnt * sizeof (uint64_t));
}
}
if ((error = zfs_zget(zfsvfs, lr->lr_foid, &zp)) != 0)
return (error);
bzero(&vsa, sizeof (vsa));
vsa.vsa_mask = VSA_ACE | VSA_ACECNT | VSA_ACE_ACLFLAGS;
vsa.vsa_aclcnt = lr->lr_aclcnt;
vsa.vsa_aclentp = ace;
vsa.vsa_aclentsz = lr->lr_acl_bytes;
vsa.vsa_aclflags = lr->lr_acl_flags;
if (lr->lr_fuidcnt) {
void *fuidstart = (caddr_t)ace +
ZIL_ACE_LENGTH(lr->lr_acl_bytes);
zfsvfs->z_fuid_replay =
zfs_replay_fuids(fuidstart, &fuidstart,
lr->lr_fuidcnt, lr->lr_domcnt, 0, 0);
}
error = zfs_setsecattr(zp, &vsa, 0, kcred);
if (zfsvfs->z_fuid_replay)
zfs_fuid_info_free(zfsvfs->z_fuid_replay);
zfsvfs->z_fuid_replay = NULL;
zrele(zp);
return (error);
}
/*
* Callback vectors for replaying records
*/
zil_replay_func_t *zfs_replay_vector[TX_MAX_TYPE] = {
zfs_replay_error, /* no such type */
zfs_replay_create, /* TX_CREATE */
zfs_replay_create, /* TX_MKDIR */
zfs_replay_create, /* TX_MKXATTR */
zfs_replay_create, /* TX_SYMLINK */
zfs_replay_remove, /* TX_REMOVE */
zfs_replay_remove, /* TX_RMDIR */
zfs_replay_link, /* TX_LINK */
zfs_replay_rename, /* TX_RENAME */
zfs_replay_write, /* TX_WRITE */
zfs_replay_truncate, /* TX_TRUNCATE */
zfs_replay_setattr, /* TX_SETATTR */
zfs_replay_acl_v0, /* TX_ACL_V0 */
zfs_replay_acl, /* TX_ACL */
zfs_replay_create_acl, /* TX_CREATE_ACL */
zfs_replay_create, /* TX_CREATE_ATTR */
zfs_replay_create_acl, /* TX_CREATE_ACL_ATTR */
zfs_replay_create_acl, /* TX_MKDIR_ACL */
zfs_replay_create, /* TX_MKDIR_ATTR */
zfs_replay_create_acl, /* TX_MKDIR_ACL_ATTR */
zfs_replay_write2, /* TX_WRITE2 */
};
diff --git a/sys/contrib/openzfs/module/zfs/zil.c b/sys/contrib/openzfs/module/zfs/zil.c
index 2eeb4fa4fe42..640e805d093a 100644
--- a/sys/contrib/openzfs/module/zfs/zil.c
+++ b/sys/contrib/openzfs/module/zfs/zil.c
@@ -1,3705 +1,3733 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2018 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright (c) 2018 Datto Inc.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/arc.h>
#include <sys/stat.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/dsl_dataset.h>
#include <sys/vdev_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_pool.h>
#include <sys/metaslab.h>
#include <sys/trace_zfs.h>
#include <sys/abd.h>
/*
* The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system
* calls that change the file system. Each itx has enough information to
* be able to replay them after a system crash, power loss, or
* equivalent failure mode. These are stored in memory until either:
*
* 1. they are committed to the pool by the DMU transaction group
* (txg), at which point they can be discarded; or
* 2. they are committed to the on-disk ZIL for the dataset being
* modified (e.g. due to an fsync, O_DSYNC, or other synchronous
* requirement).
*
* In the event of a crash or power loss, the itxs contained by each
* dataset's on-disk ZIL will be replayed when that dataset is first
* instantiated (e.g. if the dataset is a normal filesystem, when it is
* first mounted).
*
* As hinted at above, there is one ZIL per dataset (both the in-memory
* representation, and the on-disk representation). The on-disk format
* consists of 3 parts:
*
* - a single, per-dataset, ZIL header; which points to a chain of
* - zero or more ZIL blocks; each of which contains
* - zero or more ZIL records
*
* A ZIL record holds the information necessary to replay a single
* system call transaction. A ZIL block can hold many ZIL records, and
* the blocks are chained together, similarly to a singly linked list.
*
* Each ZIL block contains a block pointer (blkptr_t) to the next ZIL
* block in the chain, and the ZIL header points to the first block in
* the chain.
*
* Note, there is not a fixed place in the pool to hold these ZIL
* blocks; they are dynamically allocated and freed as needed from the
* blocks available on the pool, though they can be preferentially
* allocated from a dedicated "log" vdev.
*/
/*
* This controls the amount of time that a ZIL block (lwb) will remain
* "open" when it isn't "full", and it has a thread waiting for it to be
* committed to stable storage. Please refer to the zil_commit_waiter()
* function (and the comments within it) for more details.
*/
int zfs_commit_timeout_pct = 5;
/*
* See zil.h for more information about these fields.
*/
zil_stats_t zil_stats = {
{ "zil_commit_count", KSTAT_DATA_UINT64 },
{ "zil_commit_writer_count", KSTAT_DATA_UINT64 },
{ "zil_itx_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_count", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
};
static kstat_t *zil_ksp;
/*
* Disable intent logging replay. This global ZIL switch affects all pools.
*/
int zil_replay_disable = 0;
/*
* Disable the DKIOCFLUSHWRITECACHE commands that are normally sent to
* the disk(s) by the ZIL after an LWB write has completed. Setting this
* will cause ZIL corruption on power loss if a volatile out-of-order
* write cache is enabled.
*/
int zil_nocacheflush = 0;
/*
* Limit SLOG write size per commit executed with synchronous priority.
* Any writes above that will be executed with lower (asynchronous) priority
* to limit potential SLOG device abuse by single active ZIL writer.
*/
unsigned long zil_slog_bulk = 768 * 1024;
static kmem_cache_t *zil_lwb_cache;
static kmem_cache_t *zil_zcw_cache;
#define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
static int
zil_bp_compare(const void *x1, const void *x2)
{
const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
int cmp = TREE_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2));
if (likely(cmp))
return (cmp);
return (TREE_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2)));
}
static void
zil_bp_tree_init(zilog_t *zilog)
{
avl_create(&zilog->zl_bp_tree, zil_bp_compare,
sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
}
static void
zil_bp_tree_fini(zilog_t *zilog)
{
avl_tree_t *t = &zilog->zl_bp_tree;
zil_bp_node_t *zn;
void *cookie = NULL;
while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zn, sizeof (zil_bp_node_t));
avl_destroy(t);
}
int
zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
{
avl_tree_t *t = &zilog->zl_bp_tree;
const dva_t *dva;
zil_bp_node_t *zn;
avl_index_t where;
if (BP_IS_EMBEDDED(bp))
return (0);
dva = BP_IDENTITY(bp);
if (avl_find(t, dva, &where) != NULL)
return (SET_ERROR(EEXIST));
zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
zn->zn_dva = *dva;
avl_insert(t, zn, where);
return (0);
}
static zil_header_t *
zil_header_in_syncing_context(zilog_t *zilog)
{
return ((zil_header_t *)zilog->zl_header);
}
static void
zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
{
zio_cksum_t *zc = &bp->blk_cksum;
(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_0],
sizeof (zc->zc_word[ZIL_ZC_GUID_0]));
(void) random_get_pseudo_bytes((void *)&zc->zc_word[ZIL_ZC_GUID_1],
sizeof (zc->zc_word[ZIL_ZC_GUID_1]));
zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
}
/*
* Read a log block and make sure it's valid.
*/
static int
zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp,
blkptr_t *nbp, void *dst, char **end)
{
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
zio_flags |= ZIO_FLAG_SPECULATIVE;
if (!decrypt)
zio_flags |= ZIO_FLAG_RAW;
SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func,
&abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
zio_cksum_t cksum = bp->blk_cksum;
/*
* Validate the checksummed log block.
*
* Sequence numbers should be... sequential. The checksum
* verifier for the next block should be bp's checksum plus 1.
*
* Also check the log chain linkage and size used.
*/
cksum.zc_word[ZIL_ZC_SEQ]++;
if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
zil_chain_t *zilc = abuf->b_data;
char *lr = (char *)(zilc + 1);
uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
error = SET_ERROR(ECKSUM);
} else {
ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE);
bcopy(lr, dst, len);
*end = (char *)dst + len;
*nbp = zilc->zc_next_blk;
}
} else {
char *lr = abuf->b_data;
uint64_t size = BP_GET_LSIZE(bp);
zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
(zilc->zc_nused > (size - sizeof (*zilc)))) {
error = SET_ERROR(ECKSUM);
} else {
ASSERT3U(zilc->zc_nused, <=,
SPA_OLD_MAXBLOCKSIZE);
bcopy(lr, dst, zilc->zc_nused);
*end = (char *)dst + zilc->zc_nused;
*nbp = zilc->zc_next_blk;
}
}
arc_buf_destroy(abuf, &abuf);
}
return (error);
}
/*
* Read a TX_WRITE log data block.
*/
static int
zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
{
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
const blkptr_t *bp = &lr->lr_blkptr;
arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
if (BP_IS_HOLE(bp)) {
if (wbuf != NULL)
bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
return (0);
}
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
/*
* If we are not using the resulting data, we are just checking that
* it hasn't been corrupted so we don't need to waste CPU time
* decompressing and decrypting it.
*/
if (wbuf == NULL)
zio_flags |= ZIO_FLAG_RAW;
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
if (wbuf != NULL)
bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
arc_buf_destroy(abuf, &abuf);
}
return (error);
}
/*
* Parse the intent log, and call parse_func for each valid record within.
*/
int
zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
boolean_t decrypt)
{
const zil_header_t *zh = zilog->zl_header;
boolean_t claimed = !!zh->zh_claim_txg;
uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
uint64_t max_blk_seq = 0;
uint64_t max_lr_seq = 0;
uint64_t blk_count = 0;
uint64_t lr_count = 0;
blkptr_t blk, next_blk;
char *lrbuf, *lrp;
int error = 0;
bzero(&next_blk, sizeof (blkptr_t));
/*
* Old logs didn't record the maximum zh_claim_lr_seq.
*/
if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
claim_lr_seq = UINT64_MAX;
/*
* Starting at the block pointed to by zh_log we read the log chain.
* For each block in the chain we strongly check that block to
* ensure its validity. We stop when an invalid block is found.
* For each block pointer in the chain we call parse_blk_func().
* For each record in each valid block we call parse_lr_func().
* If the log has been claimed, stop if we encounter a sequence
* number greater than the highest claimed sequence number.
*/
lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE);
zil_bp_tree_init(zilog);
for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
int reclen;
char *end = NULL;
if (blk_seq > claim_blk_seq)
break;
error = parse_blk_func(zilog, &blk, arg, txg);
if (error != 0)
break;
ASSERT3U(max_blk_seq, <, blk_seq);
max_blk_seq = blk_seq;
blk_count++;
if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
break;
error = zil_read_log_block(zilog, decrypt, &blk, &next_blk,
lrbuf, &end);
if (error != 0)
break;
for (lrp = lrbuf; lrp < end; lrp += reclen) {
lr_t *lr = (lr_t *)lrp;
reclen = lr->lrc_reclen;
ASSERT3U(reclen, >=, sizeof (lr_t));
if (lr->lrc_seq > claim_lr_seq)
goto done;
error = parse_lr_func(zilog, lr, arg, txg);
if (error != 0)
goto done;
ASSERT3U(max_lr_seq, <, lr->lrc_seq);
max_lr_seq = lr->lrc_seq;
lr_count++;
}
}
done:
zilog->zl_parse_error = error;
zilog->zl_parse_blk_seq = max_blk_seq;
zilog->zl_parse_lr_seq = max_lr_seq;
zilog->zl_parse_blk_count = blk_count;
zilog->zl_parse_lr_count = lr_count;
ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) ||
(max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq) ||
(decrypt && error == EIO));
zil_bp_tree_fini(zilog);
zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE);
return (error);
}
/* ARGSUSED */
static int
zil_clear_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t first_txg)
{
ASSERT(!BP_IS_HOLE(bp));
/*
* As we call this function from the context of a rewind to a
* checkpoint, each ZIL block whose txg is later than the txg
* that we rewind to is invalid. Thus, we return -1 so
* zil_parse() doesn't attempt to read it.
*/
if (bp->blk_birth >= first_txg)
return (-1);
if (zil_bp_tree_add(zilog, bp) != 0)
return (0);
zio_free(zilog->zl_spa, first_txg, bp);
return (0);
}
/* ARGSUSED */
static int
zil_noop_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t first_txg)
{
return (0);
}
static int
zil_claim_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t first_txg)
{
/*
* Claim log block if not already committed and not already claimed.
* If tx == NULL, just verify that the block is claimable.
*/
if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg ||
zil_bp_tree_add(zilog, bp) != 0)
return (0);
return (zio_wait(zio_claim(NULL, zilog->zl_spa,
tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
}
static int
zil_claim_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t first_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
int error;
if (lrc->lrc_txtype != TX_WRITE)
return (0);
/*
* If the block is not readable, don't claim it. This can happen
* in normal operation when a log block is written to disk before
* some of the dmu_sync() blocks it points to. In this case, the
* transaction cannot have been committed to anyone (we would have
* waited for all writes to be stable first), so it is semantically
* correct to declare this the end of the log.
*/
if (lr->lr_blkptr.blk_birth >= first_txg) {
error = zil_read_log_data(zilog, lr, NULL);
if (error != 0)
return (error);
}
return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
}
/* ARGSUSED */
static int
zil_free_log_block(zilog_t *zilog, const blkptr_t *bp, void *tx,
uint64_t claim_txg)
{
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
return (0);
}
static int
zil_free_log_record(zilog_t *zilog, const lr_t *lrc, void *tx,
uint64_t claim_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
blkptr_t *bp = &lr->lr_blkptr;
/*
* If we previously claimed it, we need to free it.
*/
if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE &&
bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
!BP_IS_HOLE(bp))
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
return (0);
}
static int
zil_lwb_vdev_compare(const void *x1, const void *x2)
{
const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
return (TREE_CMP(v1, v2));
}
static lwb_t *
zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg,
boolean_t fastwrite)
{
lwb_t *lwb;
lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
lwb->lwb_zilog = zilog;
lwb->lwb_blk = *bp;
lwb->lwb_fastwrite = fastwrite;
lwb->lwb_slog = slog;
lwb->lwb_state = LWB_STATE_CLOSED;
lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
lwb->lwb_max_txg = txg;
lwb->lwb_write_zio = NULL;
lwb->lwb_root_zio = NULL;
lwb->lwb_tx = NULL;
lwb->lwb_issued_timestamp = 0;
if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
lwb->lwb_nused = sizeof (zil_chain_t);
lwb->lwb_sz = BP_GET_LSIZE(bp);
} else {
lwb->lwb_nused = 0;
lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
}
mutex_enter(&zilog->zl_lock);
list_insert_tail(&zilog->zl_lwb_list, lwb);
mutex_exit(&zilog->zl_lock);
ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
VERIFY(list_is_empty(&lwb->lwb_waiters));
VERIFY(list_is_empty(&lwb->lwb_itxs));
return (lwb);
}
static void
zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
{
ASSERT(MUTEX_HELD(&zilog->zl_lock));
ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
VERIFY(list_is_empty(&lwb->lwb_waiters));
VERIFY(list_is_empty(&lwb->lwb_itxs));
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
ASSERT3P(lwb->lwb_write_zio, ==, NULL);
ASSERT3P(lwb->lwb_root_zio, ==, NULL);
ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa));
ASSERT(lwb->lwb_state == LWB_STATE_CLOSED ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
/*
* Clear the zilog's field to indicate this lwb is no longer
* valid, and prevent use-after-free errors.
*/
if (zilog->zl_last_lwb_opened == lwb)
zilog->zl_last_lwb_opened = NULL;
kmem_cache_free(zil_lwb_cache, lwb);
}
/*
* Called when we create in-memory log transactions so that we know
* to cleanup the itxs at the end of spa_sync().
*/
static void
zilog_dirty(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
ASSERT(spa_writeable(zilog->zl_spa));
if (ds->ds_is_snapshot)
panic("dirtying snapshot!");
if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
/* up the hold count until we can be written out */
dmu_buf_add_ref(ds->ds_dbuf, zilog);
zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg);
}
}
/*
* Determine if the zil is dirty in the specified txg. Callers wanting to
* ensure that the dirty state does not change must hold the itxg_lock for
* the specified txg. Holding the lock will ensure that the zil cannot be
* dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
* state.
*/
static boolean_t __maybe_unused
zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
return (B_TRUE);
return (B_FALSE);
}
/*
* Determine if the zil is dirty. The zil is considered dirty if it has
* any pending itx records that have not been cleaned by zil_clean().
*/
static boolean_t
zilog_is_dirty(zilog_t *zilog)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Create an on-disk intent log.
*/
static lwb_t *
zil_create(zilog_t *zilog)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb = NULL;
uint64_t txg = 0;
dmu_tx_t *tx = NULL;
blkptr_t blk;
int error = 0;
boolean_t fastwrite = FALSE;
boolean_t slog = FALSE;
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
ASSERT(zh->zh_claim_txg == 0);
ASSERT(zh->zh_replay_seq == 0);
blk = zh->zh_log;
/*
* Allocate an initial log block if:
* - there isn't one already
* - the existing block is the wrong endianness
*/
if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
if (!BP_IS_HOLE(&blk)) {
zio_free(zilog->zl_spa, txg, &blk);
BP_ZERO(&blk);
}
error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk,
ZIL_MIN_BLKSZ, &slog);
fastwrite = TRUE;
if (error == 0)
zil_init_log_chain(zilog, &blk);
}
/*
* Allocate a log write block (lwb) for the first log block.
*/
if (error == 0)
lwb = zil_alloc_lwb(zilog, &blk, slog, txg, fastwrite);
/*
* If we just allocated the first log block, commit our transaction
* and wait for zil_sync() to stuff the block pointer into zh_log.
* (zh is part of the MOS, so we cannot modify it in open context.)
*/
if (tx != NULL) {
dmu_tx_commit(tx);
txg_wait_synced(zilog->zl_dmu_pool, txg);
}
ASSERT(error != 0 || bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
IMPLY(error == 0, lwb != NULL);
return (lwb);
}
/*
* In one tx, free all log blocks and clear the log header. If keep_first
* is set, then we're replaying a log with no content. We want to keep the
* first block, however, so that the first synchronous transaction doesn't
* require a txg_wait_synced() in zil_create(). We don't need to
* txg_wait_synced() here either when keep_first is set, because both
* zil_create() and zil_destroy() will wait for any in-progress destroys
* to complete.
*/
void
zil_destroy(zilog_t *zilog, boolean_t keep_first)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb;
dmu_tx_t *tx;
uint64_t txg;
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_old_header = *zh; /* debugging aid */
if (BP_IS_HOLE(&zh->zh_log))
return;
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
mutex_enter(&zilog->zl_lock);
ASSERT3U(zilog->zl_destroy_txg, <, txg);
zilog->zl_destroy_txg = txg;
zilog->zl_keep_first = keep_first;
if (!list_is_empty(&zilog->zl_lwb_list)) {
ASSERT(zh->zh_claim_txg == 0);
VERIFY(!keep_first);
while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
if (lwb->lwb_fastwrite)
metaslab_fastwrite_unmark(zilog->zl_spa,
&lwb->lwb_blk);
list_remove(&zilog->zl_lwb_list, lwb);
if (lwb->lwb_buf != NULL)
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
zio_free(zilog->zl_spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
}
} else if (!keep_first) {
zil_destroy_sync(zilog, tx);
}
mutex_exit(&zilog->zl_lock);
dmu_tx_commit(tx);
}
void
zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
{
ASSERT(list_is_empty(&zilog->zl_lwb_list));
(void) zil_parse(zilog, zil_free_log_block,
zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE);
}
int
zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg)
{
dmu_tx_t *tx = txarg;
zilog_t *zilog;
uint64_t first_txg;
zil_header_t *zh;
objset_t *os;
int error;
error = dmu_objset_own_obj(dp, ds->ds_object,
DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os);
if (error != 0) {
/*
* EBUSY indicates that the objset is inconsistent, in which
* case it can not have a ZIL.
*/
if (error != EBUSY) {
cmn_err(CE_WARN, "can't open objset for %llu, error %u",
(unsigned long long)ds->ds_object, error);
}
return (0);
}
zilog = dmu_objset_zil(os);
zh = zil_header_in_syncing_context(zilog);
ASSERT3U(tx->tx_txg, ==, spa_first_txg(zilog->zl_spa));
first_txg = spa_min_claim_txg(zilog->zl_spa);
/*
* If the spa_log_state is not set to be cleared, check whether
* the current uberblock is a checkpoint one and if the current
* header has been claimed before moving on.
*
* If the current uberblock is a checkpointed uberblock then
* one of the following scenarios took place:
*
* 1] We are currently rewinding to the checkpoint of the pool.
* 2] We crashed in the middle of a checkpoint rewind but we
* did manage to write the checkpointed uberblock to the
* vdev labels, so when we tried to import the pool again
* the checkpointed uberblock was selected from the import
* procedure.
*
* In both cases we want to zero out all the ZIL blocks, except
* the ones that have been claimed at the time of the checkpoint
* (their zh_claim_txg != 0). The reason is that these blocks
* may be corrupted since we may have reused their locations on
* disk after we took the checkpoint.
*
* We could try to set spa_log_state to SPA_LOG_CLEAR earlier
* when we first figure out whether the current uberblock is
* checkpointed or not. Unfortunately, that would discard all
* the logs, including the ones that are claimed, and we would
* leak space.
*/
if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR ||
(zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
zh->zh_claim_txg == 0)) {
if (!BP_IS_HOLE(&zh->zh_log)) {
(void) zil_parse(zilog, zil_clear_log_block,
zil_noop_log_record, tx, first_txg, B_FALSE);
}
BP_ZERO(&zh->zh_log);
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* If we are not rewinding and opening the pool normally, then
* the min_claim_txg should be equal to the first txg of the pool.
*/
ASSERT3U(first_txg, ==, spa_first_txg(zilog->zl_spa));
/*
* Claim all log blocks if we haven't already done so, and remember
* the highest claimed sequence number. This ensures that if we can
* read only part of the log now (e.g. due to a missing device),
* but we can read the entire log later, we will not try to replay
* or destroy beyond the last block we successfully claimed.
*/
ASSERT3U(zh->zh_claim_txg, <=, first_txg);
if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
(void) zil_parse(zilog, zil_claim_log_block,
zil_claim_log_record, tx, first_txg, B_FALSE);
zh->zh_claim_txg = first_txg;
zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
zh->zh_flags |= ZIL_REPLAY_NEEDED;
zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
}
ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* Check the log by walking the log chain.
* Checksum errors are ok as they indicate the end of the chain.
* Any other error (no device or read failure) returns an error.
*/
/* ARGSUSED */
int
zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
{
zilog_t *zilog;
objset_t *os;
blkptr_t *bp;
int error;
ASSERT(tx == NULL);
error = dmu_objset_from_ds(ds, &os);
if (error != 0) {
cmn_err(CE_WARN, "can't open objset %llu, error %d",
(unsigned long long)ds->ds_object, error);
return (0);
}
zilog = dmu_objset_zil(os);
bp = (blkptr_t *)&zilog->zl_header->zh_log;
if (!BP_IS_HOLE(bp)) {
vdev_t *vd;
boolean_t valid = B_TRUE;
/*
* Check the first block and determine if it's on a log device
* which may have been removed or faulted prior to loading this
* pool. If so, there's no point in checking the rest of the
* log as its content should have already been synced to the
* pool.
*/
spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
if (vd->vdev_islog && vdev_is_dead(vd))
valid = vdev_log_state_valid(vd);
spa_config_exit(os->os_spa, SCL_STATE, FTAG);
if (!valid)
return (0);
/*
* Check whether the current uberblock is checkpointed (e.g.
* we are rewinding) and whether the current header has been
* claimed or not. If it hasn't then skip verifying it. We
* do this because its ZIL blocks may be part of the pool's
* state before the rewind, which is no longer valid.
*/
zil_header_t *zh = zil_header_in_syncing_context(zilog);
if (zilog->zl_spa->spa_uberblock.ub_checkpoint_txg != 0 &&
zh->zh_claim_txg == 0)
return (0);
}
/*
* Because tx == NULL, zil_claim_log_block() will not actually claim
* any blocks, but just determine whether it is possible to do so.
* In addition to checking the log chain, zil_claim_log_block()
* will invoke zio_claim() with a done func of spa_claim_notify(),
* which will update spa_max_claim_txg. See spa_load() for details.
*/
error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
zilog->zl_header->zh_claim_txg ? -1ULL :
spa_min_claim_txg(os->os_spa), B_FALSE);
return ((error == ECKSUM || error == ENOENT) ? 0 : error);
}
/*
* When an itx is "skipped", this function is used to properly mark the
* waiter as "done, and signal any thread(s) waiting on it. An itx can
* be skipped (and not committed to an lwb) for a variety of reasons,
* one of them being that the itx was committed via spa_sync(), prior to
* it being committed to an lwb; this can happen if a thread calling
* zil_commit() is racing with spa_sync().
*/
static void
zil_commit_waiter_skip(zil_commit_waiter_t *zcw)
{
mutex_enter(&zcw->zcw_lock);
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
/*
* This function is used when the given waiter is to be linked into an
* lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb.
* At this point, the waiter will no longer be referenced by the itx,
* and instead, will be referenced by the lwb.
*/
static void
zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb)
{
/*
* The lwb_waiters field of the lwb is protected by the zilog's
* zl_lock, thus it must be held when calling this function.
*/
ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_lock));
mutex_enter(&zcw->zcw_lock);
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
ASSERT3P(lwb, !=, NULL);
ASSERT(lwb->lwb_state == LWB_STATE_OPENED ||
lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE);
list_insert_tail(&lwb->lwb_waiters, zcw);
zcw->zcw_lwb = lwb;
mutex_exit(&zcw->zcw_lock);
}
/*
* This function is used when zio_alloc_zil() fails to allocate a ZIL
* block, and the given waiter must be linked to the "nolwb waiters"
* list inside of zil_process_commit_list().
*/
static void
zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb)
{
mutex_enter(&zcw->zcw_lock);
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
list_insert_tail(nolwb, zcw);
mutex_exit(&zcw->zcw_lock);
}
void
zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
{
avl_tree_t *t = &lwb->lwb_vdev_tree;
avl_index_t where;
zil_vdev_node_t *zv, zvsearch;
int ndvas = BP_GET_NDVAS(bp);
int i;
if (zil_nocacheflush)
return;
mutex_enter(&lwb->lwb_vdev_lock);
for (i = 0; i < ndvas; i++) {
zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
if (avl_find(t, &zvsearch, &where) == NULL) {
zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
zv->zv_vdev = zvsearch.zv_vdev;
avl_insert(t, zv, where);
}
}
mutex_exit(&lwb->lwb_vdev_lock);
}
static void
zil_lwb_flush_defer(lwb_t *lwb, lwb_t *nlwb)
{
avl_tree_t *src = &lwb->lwb_vdev_tree;
avl_tree_t *dst = &nlwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(nlwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
/*
* While 'lwb' is at a point in its lifetime where lwb_vdev_tree does
* not need the protection of lwb_vdev_lock (it will only be modified
* while holding zilog->zl_lock) as its writes and those of its
* children have all completed. The younger 'nlwb' may be waiting on
* future writes to additional vdevs.
*/
mutex_enter(&nlwb->lwb_vdev_lock);
/*
* Tear down the 'lwb' vdev tree, ensuring that entries which do not
* exist in 'nlwb' are moved to it, freeing any would-be duplicates.
*/
while ((zv = avl_destroy_nodes(src, &cookie)) != NULL) {
avl_index_t where;
if (avl_find(dst, zv, &where) == NULL) {
avl_insert(dst, zv, where);
} else {
kmem_free(zv, sizeof (*zv));
}
}
mutex_exit(&nlwb->lwb_vdev_lock);
}
void
zil_lwb_add_txg(lwb_t *lwb, uint64_t txg)
{
lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
}
/*
* This function is a called after all vdevs associated with a given lwb
* write have completed their DKIOCFLUSHWRITECACHE command; or as soon
* as the lwb write completes, if "zil_nocacheflush" is set. Further,
* all "previous" lwb's will have completed before this function is
* called; i.e. this function is called for all previous lwbs before
* it's called for "this" lwb (enforced via zio the dependencies
* configured in zil_lwb_set_zio_dependency()).
*
* The intention is for this function to be called as soon as the
* contents of an lwb are considered "stable" on disk, and will survive
* any sudden loss of power. At this point, any threads waiting for the
* lwb to reach this state are signalled, and the "waiter" structures
* are marked "done".
*/
static void
zil_lwb_flush_vdevs_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
zilog_t *zilog = lwb->lwb_zilog;
dmu_tx_t *tx = lwb->lwb_tx;
zil_commit_waiter_t *zcw;
itx_t *itx;
spa_config_exit(zilog->zl_spa, SCL_STATE, lwb);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
mutex_enter(&zilog->zl_lock);
/*
* Ensure the lwb buffer pointer is cleared before releasing the
* txg. If we have had an allocation failure and the txg is
* waiting to sync then we want zil_sync() to remove the lwb so
* that it's not picked up as the next new one in
* zil_process_commit_list(). zil_sync() will only remove the
* lwb if lwb_buf is null.
*/
lwb->lwb_buf = NULL;
lwb->lwb_tx = NULL;
ASSERT3U(lwb->lwb_issued_timestamp, >, 0);
zilog->zl_last_lwb_latency = gethrtime() - lwb->lwb_issued_timestamp;
lwb->lwb_root_zio = NULL;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_WRITE_DONE);
lwb->lwb_state = LWB_STATE_FLUSH_DONE;
if (zilog->zl_last_lwb_opened == lwb) {
/*
* Remember the highest committed log sequence number
* for ztest. We only update this value when all the log
* writes succeeded, because ztest wants to ASSERT that
* it got the whole log chain.
*/
zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
}
while ((itx = list_head(&lwb->lwb_itxs)) != NULL) {
list_remove(&lwb->lwb_itxs, itx);
zil_itx_destroy(itx);
}
while ((zcw = list_head(&lwb->lwb_waiters)) != NULL) {
mutex_enter(&zcw->zcw_lock);
ASSERT(list_link_active(&zcw->zcw_node));
list_remove(&lwb->lwb_waiters, zcw);
ASSERT3P(zcw->zcw_lwb, ==, lwb);
zcw->zcw_lwb = NULL;
+ /*
+ * We expect any ZIO errors from child ZIOs to have been
+ * propagated "up" to this specific LWB's root ZIO, in
+ * order for this error handling to work correctly. This
+ * includes ZIO errors from either this LWB's write or
+ * flush, as well as any errors from other dependent LWBs
+ * (e.g. a root LWB ZIO that might be a child of this LWB).
+ *
+ * With that said, it's important to note that LWB flush
+ * errors are not propagated up to the LWB root ZIO.
+ * This is incorrect behavior, and results in VDEV flush
+ * errors not being handled correctly here. See the
+ * comment above the call to "zio_flush" for details.
+ */
zcw->zcw_zio_error = zio->io_error;
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
mutex_exit(&zilog->zl_lock);
/*
* Now that we've written this log block, we have a stable pointer
* to the next block in the chain, so it's OK to let the txg in
* which we allocated the next block sync.
*/
dmu_tx_commit(tx);
}
/*
* This is called when an lwb's write zio completes. The callback's
* purpose is to issue the DKIOCFLUSHWRITECACHE commands for the vdevs
* in the lwb's lwb_vdev_tree. The tree will contain the vdevs involved
* in writing out this specific lwb's data, and in the case that cache
* flushes have been deferred, vdevs involved in writing the data for
* previous lwbs. The writes corresponding to all the vdevs in the
* lwb_vdev_tree will have completed by the time this is called, due to
* the zio dependencies configured in zil_lwb_set_zio_dependency(),
* which takes deferred flushes into account. The lwb will be "done"
* once zil_lwb_flush_vdevs_done() is called, which occurs in the zio
* completion callback for the lwb's root zio.
*/
static void
zil_lwb_write_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
spa_t *spa = zio->io_spa;
zilog_t *zilog = lwb->lwb_zilog;
avl_tree_t *t = &lwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
lwb_t *nlwb;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0);
ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
ASSERT(!BP_IS_GANG(zio->io_bp));
ASSERT(!BP_IS_HOLE(zio->io_bp));
ASSERT(BP_GET_FILL(zio->io_bp) == 0);
abd_free(zio->io_abd);
mutex_enter(&zilog->zl_lock);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED);
lwb->lwb_state = LWB_STATE_WRITE_DONE;
lwb->lwb_write_zio = NULL;
lwb->lwb_fastwrite = FALSE;
nlwb = list_next(&zilog->zl_lwb_list, lwb);
mutex_exit(&zilog->zl_lock);
if (avl_numnodes(t) == 0)
return;
/*
* If there was an IO error, we're not going to call zio_flush()
* on these vdevs, so we simply empty the tree and free the
* nodes. We avoid calling zio_flush() since there isn't any
* good reason for doing so, after the lwb block failed to be
* written out.
+ *
+ * Additionally, we don't perform any further error handling at
+ * this point (e.g. setting "zcw_zio_error" appropriately), as
+ * we expect that to occur in "zil_lwb_flush_vdevs_done" (thus,
+ * we expect any error seen here, to have been propagated to
+ * that function).
*/
if (zio->io_error != 0) {
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zv, sizeof (*zv));
return;
}
/*
* If this lwb does not have any threads waiting for it to
* complete, we want to defer issuing the DKIOCFLUSHWRITECACHE
* command to the vdevs written to by "this" lwb, and instead
* rely on the "next" lwb to handle the DKIOCFLUSHWRITECACHE
* command for those vdevs. Thus, we merge the vdev tree of
* "this" lwb with the vdev tree of the "next" lwb in the list,
* and assume the "next" lwb will handle flushing the vdevs (or
* deferring the flush(s) again).
*
* This is a useful performance optimization, especially for
* workloads with lots of async write activity and few sync
* write and/or fsync activity, as it has the potential to
* coalesce multiple flush commands to a vdev into one.
*/
if (list_head(&lwb->lwb_waiters) == NULL && nlwb != NULL) {
zil_lwb_flush_defer(lwb, nlwb);
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
return;
}
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
- if (vd != NULL)
+ if (vd != NULL) {
+ /*
+ * The "ZIO_FLAG_DONT_PROPAGATE" is currently
+ * always used within "zio_flush". This means,
+ * any errors when flushing the vdev(s), will
+ * (unfortunately) not be handled correctly,
+ * since these "zio_flush" errors will not be
+ * propagated up to "zil_lwb_flush_vdevs_done".
+ */
zio_flush(lwb->lwb_root_zio, vd);
+ }
kmem_free(zv, sizeof (*zv));
}
}
static void
zil_lwb_set_zio_dependency(zilog_t *zilog, lwb_t *lwb)
{
lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(MUTEX_HELD(&zilog->zl_lock));
/*
* The zilog's "zl_last_lwb_opened" field is used to build the
* lwb/zio dependency chain, which is used to preserve the
* ordering of lwb completions that is required by the semantics
* of the ZIL. Each new lwb zio becomes a parent of the
* "previous" lwb zio, such that the new lwb's zio cannot
* complete until the "previous" lwb's zio completes.
*
* This is required by the semantics of zil_commit(); the commit
* waiters attached to the lwbs will be woken in the lwb zio's
* completion callback, so this zio dependency graph ensures the
* waiters are woken in the correct order (the same order the
* lwbs were created).
*/
if (last_lwb_opened != NULL &&
last_lwb_opened->lwb_state != LWB_STATE_FLUSH_DONE) {
ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED ||
last_lwb_opened->lwb_state == LWB_STATE_ISSUED ||
last_lwb_opened->lwb_state == LWB_STATE_WRITE_DONE);
ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL);
zio_add_child(lwb->lwb_root_zio,
last_lwb_opened->lwb_root_zio);
/*
* If the previous lwb's write hasn't already completed,
* we also want to order the completion of the lwb write
* zios (above, we only order the completion of the lwb
* root zios). This is required because of how we can
* defer the DKIOCFLUSHWRITECACHE commands for each lwb.
*
* When the DKIOCFLUSHWRITECACHE commands are deferred,
* the previous lwb will rely on this lwb to flush the
* vdevs written to by that previous lwb. Thus, we need
* to ensure this lwb doesn't issue the flush until
* after the previous lwb's write completes. We ensure
* this ordering by setting the zio parent/child
* relationship here.
*
* Without this relationship on the lwb's write zio,
* it's possible for this lwb's write to complete prior
* to the previous lwb's write completing; and thus, the
* vdevs for the previous lwb would be flushed prior to
* that lwb's data being written to those vdevs (the
* vdevs are flushed in the lwb write zio's completion
* handler, zil_lwb_write_done()).
*/
if (last_lwb_opened->lwb_state != LWB_STATE_WRITE_DONE) {
ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED ||
last_lwb_opened->lwb_state == LWB_STATE_ISSUED);
ASSERT3P(last_lwb_opened->lwb_write_zio, !=, NULL);
zio_add_child(lwb->lwb_write_zio,
last_lwb_opened->lwb_write_zio);
}
}
}
/*
* This function's purpose is to "open" an lwb such that it is ready to
* accept new itxs being committed to it. To do this, the lwb's zio
* structures are created, and linked to the lwb. This function is
* idempotent; if the passed in lwb has already been opened, this
* function is essentially a no-op.
*/
static void
zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
{
zbookmark_phys_t zb;
zio_priority_t prio;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED);
EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED);
SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
/* Lock so zil_sync() doesn't fastwrite_unmark after zio is created */
mutex_enter(&zilog->zl_lock);
if (lwb->lwb_root_zio == NULL) {
abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf,
BP_GET_LSIZE(&lwb->lwb_blk));
if (!lwb->lwb_fastwrite) {
metaslab_fastwrite_mark(zilog->zl_spa, &lwb->lwb_blk);
lwb->lwb_fastwrite = 1;
}
if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk)
prio = ZIO_PRIORITY_SYNC_WRITE;
else
prio = ZIO_PRIORITY_ASYNC_WRITE;
lwb->lwb_root_zio = zio_root(zilog->zl_spa,
zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL);
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio,
zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd,
BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb,
- prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
- ZIO_FLAG_FASTWRITE, &zb);
+ prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_FASTWRITE, &zb);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
lwb->lwb_state = LWB_STATE_OPENED;
zil_lwb_set_zio_dependency(zilog, lwb);
zilog->zl_last_lwb_opened = lwb;
}
mutex_exit(&zilog->zl_lock);
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
}
/*
* Define a limited set of intent log block sizes.
*
* These must be a multiple of 4KB. Note only the amount used (again
* aligned to 4KB) actually gets written. However, we can't always just
* allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
*/
struct {
uint64_t limit;
uint64_t blksz;
} zil_block_buckets[] = {
{ 4096, 4096 }, /* non TX_WRITE */
{ 8192 + 4096, 8192 + 4096 }, /* database */
{ 32768 + 4096, 32768 + 4096 }, /* NFS writes */
{ 65536 + 4096, 65536 + 4096 }, /* 64KB writes */
{ 131072, 131072 }, /* < 128KB writes */
{ 131072 +4096, 65536 + 4096 }, /* 128KB writes */
{ UINT64_MAX, SPA_OLD_MAXBLOCKSIZE}, /* > 128KB writes */
};
/*
* Maximum block size used by the ZIL. This is picked up when the ZIL is
* initialized. Otherwise this should not be used directly; see
* zl_max_block_size instead.
*/
int zil_maxblocksize = SPA_OLD_MAXBLOCKSIZE;
/*
* Start a log block write and advance to the next log block.
* Calls are serialized.
*/
static lwb_t *
zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
{
lwb_t *nlwb = NULL;
zil_chain_t *zilc;
spa_t *spa = zilog->zl_spa;
blkptr_t *bp;
dmu_tx_t *tx;
uint64_t txg;
uint64_t zil_blksz, wsz;
int i, error;
boolean_t slog;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
zilc = (zil_chain_t *)lwb->lwb_buf;
bp = &zilc->zc_next_blk;
} else {
zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
bp = &zilc->zc_next_blk;
}
ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
/*
* Allocate the next block and save its address in this block
* before writing it in order to establish the log chain.
* Note that if the allocation of nlwb synced before we wrote
* the block that points at it (lwb), we'd leak it if we crashed.
* Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
* We dirty the dataset to ensure that zil_sync() will be called
* to clean up in the event of allocation failure or I/O failure.
*/
tx = dmu_tx_create(zilog->zl_os);
/*
* Since we are not going to create any new dirty data, and we
* can even help with clearing the existing dirty data, we
* should not be subject to the dirty data based delays. We
* use TXG_NOTHROTTLE to bypass the delay mechanism.
*/
VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
lwb->lwb_tx = tx;
/*
* Log blocks are pre-allocated. Here we select the size of the next
* block, based on size used in the last block.
* - first find the smallest bucket that will fit the block from a
* limited set of block sizes. This is because it's faster to write
* blocks allocated from the same metaslab as they are adjacent or
* close.
* - next find the maximum from the new suggested size and an array of
* previous sizes. This lessens a picket fence effect of wrongly
* guessing the size if we have a stream of say 2k, 64k, 2k, 64k
* requests.
*
* Note we only write what is used, but we can't just allocate
* the maximum block size because we can exhaust the available
* pool log space.
*/
zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
for (i = 0; zil_blksz > zil_block_buckets[i].limit; i++)
continue;
zil_blksz = MIN(zil_block_buckets[i].blksz, zilog->zl_max_block_size);
zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
for (i = 0; i < ZIL_PREV_BLKS; i++)
zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
BP_ZERO(bp);
error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, zil_blksz, &slog);
if (slog) {
ZIL_STAT_BUMP(zil_itx_metaslab_slog_count);
ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused);
} else {
ZIL_STAT_BUMP(zil_itx_metaslab_normal_count);
ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused);
}
if (error == 0) {
ASSERT3U(bp->blk_birth, ==, txg);
bp->blk_cksum = lwb->lwb_blk.blk_cksum;
bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
/*
* Allocate a new log write block (lwb).
*/
nlwb = zil_alloc_lwb(zilog, bp, slog, txg, TRUE);
}
if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
/* For Slim ZIL only write what is used. */
wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
ASSERT3U(wsz, <=, lwb->lwb_sz);
zio_shrink(lwb->lwb_write_zio, wsz);
} else {
wsz = lwb->lwb_sz;
}
zilc->zc_pad = 0;
zilc->zc_nused = lwb->lwb_nused;
zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
/*
* clear unused data for security
*/
bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER);
zil_lwb_add_block(lwb, &lwb->lwb_blk);
lwb->lwb_issued_timestamp = gethrtime();
lwb->lwb_state = LWB_STATE_ISSUED;
zio_nowait(lwb->lwb_root_zio);
zio_nowait(lwb->lwb_write_zio);
/*
* If there was an allocation failure then nlwb will be null which
* forces a txg_wait_synced().
*/
return (nlwb);
}
/*
* Maximum amount of write data that can be put into single log block.
*/
uint64_t
zil_max_log_data(zilog_t *zilog)
{
return (zilog->zl_max_block_size -
sizeof (zil_chain_t) - sizeof (lr_write_t));
}
/*
* Maximum amount of log space we agree to waste to reduce number of
* WR_NEED_COPY chunks to reduce zl_get_data() overhead (~12%).
*/
static inline uint64_t
zil_max_waste_space(zilog_t *zilog)
{
return (zil_max_log_data(zilog) / 8);
}
/*
* Maximum amount of write data for WR_COPIED. For correctness, consumers
* must fall back to WR_NEED_COPY if we can't fit the entire record into one
* maximum sized log block, because each WR_COPIED record must fit in a
* single log block. For space efficiency, we want to fit two records into a
* max-sized log block.
*/
uint64_t
zil_max_copied_data(zilog_t *zilog)
{
return ((zilog->zl_max_block_size - sizeof (zil_chain_t)) / 2 -
sizeof (lr_write_t));
}
static lwb_t *
zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
{
lr_t *lrcb, *lrc;
lr_write_t *lrwb, *lrw;
char *lr_buf;
uint64_t dlen, dnow, dpad, lwb_sp, reclen, txg, max_log_data;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
ASSERT3P(lwb->lwb_buf, !=, NULL);
zil_lwb_write_open(zilog, lwb);
lrc = &itx->itx_lr;
lrw = (lr_write_t *)lrc;
/*
* A commit itx doesn't represent any on-disk state; instead
* it's simply used as a place holder on the commit list, and
* provides a mechanism for attaching a "commit waiter" onto the
* correct lwb (such that the waiter can be signalled upon
* completion of that lwb). Thus, we don't process this itx's
* log record if it's a commit itx (these itx's don't have log
* records), and instead link the itx's waiter onto the lwb's
* list of waiters.
*
* For more details, see the comment above zil_commit().
*/
if (lrc->lrc_txtype == TX_COMMIT) {
mutex_enter(&zilog->zl_lock);
zil_commit_waiter_link_lwb(itx->itx_private, lwb);
itx->itx_private = NULL;
mutex_exit(&zilog->zl_lock);
return (lwb);
}
if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
dlen = P2ROUNDUP_TYPED(
lrw->lr_length, sizeof (uint64_t), uint64_t);
dpad = dlen - lrw->lr_length;
} else {
dlen = dpad = 0;
}
reclen = lrc->lrc_reclen;
zilog->zl_cur_used += (reclen + dlen);
txg = lrc->lrc_txg;
ASSERT3U(zilog->zl_cur_used, <, UINT64_MAX - (reclen + dlen));
cont:
/*
* If this record won't fit in the current log block, start a new one.
* For WR_NEED_COPY optimize layout for minimal number of chunks.
*/
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
max_log_data = zil_max_log_data(zilog);
if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
lwb_sp < zil_max_waste_space(zilog) &&
(dlen % max_log_data == 0 ||
lwb_sp < reclen + dlen % max_log_data))) {
lwb = zil_lwb_write_issue(zilog, lwb);
if (lwb == NULL)
return (NULL);
zil_lwb_write_open(zilog, lwb);
ASSERT(LWB_EMPTY(lwb));
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
/*
* There must be enough space in the new, empty log block to
* hold reclen. For WR_COPIED, we need to fit the whole
* record in one block, and reclen is the header size + the
* data size. For WR_NEED_COPY, we can create multiple
* records, splitting the data into multiple blocks, so we
* only need to fit one word of data per block; in this case
* reclen is just the header size (no data).
*/
ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
}
dnow = MIN(dlen, lwb_sp - reclen);
lr_buf = lwb->lwb_buf + lwb->lwb_nused;
bcopy(lrc, lr_buf, reclen);
lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */
lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */
ZIL_STAT_BUMP(zil_itx_count);
/*
* If it's a write, fetch the data or get its blkptr as appropriate.
*/
if (lrc->lrc_txtype == TX_WRITE) {
if (txg > spa_freeze_txg(zilog->zl_spa))
txg_wait_synced(zilog->zl_dmu_pool, txg);
if (itx->itx_wr_state == WR_COPIED) {
ZIL_STAT_BUMP(zil_itx_copied_count);
ZIL_STAT_INCR(zil_itx_copied_bytes, lrw->lr_length);
} else {
char *dbuf;
int error;
if (itx->itx_wr_state == WR_NEED_COPY) {
dbuf = lr_buf + reclen;
lrcb->lrc_reclen += dnow;
if (lrwb->lr_length > dnow)
lrwb->lr_length = dnow;
lrw->lr_offset += dnow;
lrw->lr_length -= dnow;
ZIL_STAT_BUMP(zil_itx_needcopy_count);
ZIL_STAT_INCR(zil_itx_needcopy_bytes, dnow);
} else {
ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT);
dbuf = NULL;
ZIL_STAT_BUMP(zil_itx_indirect_count);
ZIL_STAT_INCR(zil_itx_indirect_bytes,
lrw->lr_length);
}
/*
* We pass in the "lwb_write_zio" rather than
* "lwb_root_zio" so that the "lwb_write_zio"
* becomes the parent of any zio's created by
* the "zl_get_data" callback. The vdevs are
* flushed after the "lwb_write_zio" completes,
* so we want to make sure that completion
* callback waits for these additional zio's,
* such that the vdevs used by those zio's will
* be included in the lwb's vdev tree, and those
* vdevs will be properly flushed. If we passed
* in "lwb_root_zio" here, then these additional
* vdevs may not be flushed; e.g. if these zio's
* completed after "lwb_write_zio" completed.
*/
error = zilog->zl_get_data(itx->itx_private,
itx->itx_gen, lrwb, dbuf, lwb,
lwb->lwb_write_zio);
if (dbuf != NULL && error == 0 && dnow == dlen)
/* Zero any padding bytes in the last block. */
bzero((char *)dbuf + lrwb->lr_length, dpad);
if (error == EIO) {
txg_wait_synced(zilog->zl_dmu_pool, txg);
return (lwb);
}
if (error != 0) {
ASSERT(error == ENOENT || error == EEXIST ||
error == EALREADY);
return (lwb);
}
}
}
/*
* We're actually making an entry, so update lrc_seq to be the
* log record sequence number. Note that this is generally not
* equal to the itx sequence number because not all transactions
* are synchronous, and sometimes spa_sync() gets there first.
*/
lrcb->lrc_seq = ++zilog->zl_lr_seq;
lwb->lwb_nused += reclen + dnow;
zil_lwb_add_txg(lwb, txg);
ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
dlen -= dnow;
if (dlen > 0) {
zilog->zl_cur_used += reclen;
goto cont;
}
return (lwb);
}
itx_t *
zil_itx_create(uint64_t txtype, size_t olrsize)
{
size_t itxsize, lrsize;
itx_t *itx;
lrsize = P2ROUNDUP_TYPED(olrsize, sizeof (uint64_t), size_t);
itxsize = offsetof(itx_t, itx_lr) + lrsize;
itx = zio_data_buf_alloc(itxsize);
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_lr.lrc_seq = 0; /* defensive */
bzero((char *)&itx->itx_lr + olrsize, lrsize - olrsize);
itx->itx_sync = B_TRUE; /* default is synchronous */
itx->itx_callback = NULL;
itx->itx_callback_data = NULL;
itx->itx_size = itxsize;
return (itx);
}
void
zil_itx_destroy(itx_t *itx)
{
IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL);
IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
if (itx->itx_callback != NULL)
itx->itx_callback(itx->itx_callback_data);
zio_data_buf_free(itx, itx->itx_size);
}
/*
* Free up the sync and async itxs. The itxs_t has already been detached
* so no locks are needed.
*/
static void
zil_itxg_clean(void *arg)
{
itx_t *itx;
list_t *list;
avl_tree_t *t;
void *cookie;
itxs_t *itxs = arg;
itx_async_node_t *ian;
list = &itxs->i_sync_list;
while ((itx = list_head(list)) != NULL) {
/*
* In the general case, commit itxs will not be found
* here, as they'll be committed to an lwb via
* zil_lwb_commit(), and free'd in that function. Having
* said that, it is still possible for commit itxs to be
* found here, due to the following race:
*
* - a thread calls zil_commit() which assigns the
* commit itx to a per-txg i_sync_list
* - zil_itxg_clean() is called (e.g. via spa_sync())
* while the waiter is still on the i_sync_list
*
* There's nothing to prevent syncing the txg while the
* waiter is on the i_sync_list. This normally doesn't
* happen because spa_sync() is slower than zil_commit(),
* but if zil_commit() calls txg_wait_synced() (e.g.
* because zil_create() or zil_commit_writer_stall() is
* called) we will hit this case.
*/
if (itx->itx_lr.lrc_txtype == TX_COMMIT)
zil_commit_waiter_skip(itx->itx_private);
list_remove(list, itx);
zil_itx_destroy(itx);
}
cookie = NULL;
t = &itxs->i_async_tree;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list = &ian->ia_list;
while ((itx = list_head(list)) != NULL) {
list_remove(list, itx);
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(list);
kmem_free(ian, sizeof (itx_async_node_t));
}
avl_destroy(t);
kmem_free(itxs, sizeof (itxs_t));
}
static int
zil_aitx_compare(const void *x1, const void *x2)
{
const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
return (TREE_CMP(o1, o2));
}
/*
* Remove all async itx with the given oid.
*/
void
zil_remove_async(zilog_t *zilog, uint64_t oid)
{
uint64_t otxg, txg;
itx_async_node_t *ian;
avl_tree_t *t;
avl_index_t where;
list_t clean_list;
itx_t *itx;
ASSERT(oid != 0);
list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* Locate the object node and append its list.
*/
t = &itxg->itxg_itxs->i_async_tree;
ian = avl_find(t, &oid, &where);
if (ian != NULL)
list_move_tail(&clean_list, &ian->ia_list);
mutex_exit(&itxg->itxg_lock);
}
while ((itx = list_head(&clean_list)) != NULL) {
list_remove(&clean_list, itx);
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(&clean_list);
}
void
zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
{
uint64_t txg;
itxg_t *itxg;
itxs_t *itxs, *clean = NULL;
/*
* Ensure the data of a renamed file is committed before the rename.
*/
if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
zil_async_to_sync(zilog, itx->itx_oid);
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
txg = ZILTEST_TXG;
else
txg = dmu_tx_get_txg(tx);
itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
itxs = itxg->itxg_itxs;
if (itxg->itxg_txg != txg) {
if (itxs != NULL) {
/*
* The zil_clean callback hasn't got around to cleaning
* this itxg. Save the itxs for release below.
* This should be rare.
*/
zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
"txg %llu", (u_longlong_t)itxg->itxg_txg);
clean = itxg->itxg_itxs;
}
itxg->itxg_txg = txg;
itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t),
KM_SLEEP);
list_create(&itxs->i_sync_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
avl_create(&itxs->i_async_tree, zil_aitx_compare,
sizeof (itx_async_node_t),
offsetof(itx_async_node_t, ia_node));
}
if (itx->itx_sync) {
list_insert_tail(&itxs->i_sync_list, itx);
} else {
avl_tree_t *t = &itxs->i_async_tree;
uint64_t foid =
LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid);
itx_async_node_t *ian;
avl_index_t where;
ian = avl_find(t, &foid, &where);
if (ian == NULL) {
ian = kmem_alloc(sizeof (itx_async_node_t),
KM_SLEEP);
list_create(&ian->ia_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
ian->ia_foid = foid;
avl_insert(t, ian, where);
}
list_insert_tail(&ian->ia_list, itx);
}
itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
/*
* We don't want to dirty the ZIL using ZILTEST_TXG, because
* zil_clean() will never be called using ZILTEST_TXG. Thus, we
* need to be careful to always dirty the ZIL using the "real"
* TXG (not itxg_txg) even when the SPA is frozen.
*/
zilog_dirty(zilog, dmu_tx_get_txg(tx));
mutex_exit(&itxg->itxg_lock);
/* Release the old itxs now we've dropped the lock */
if (clean != NULL)
zil_itxg_clean(clean);
}
/*
* If there are any in-memory intent log transactions which have now been
* synced then start up a taskq to free them. We should only do this after we
* have written out the uberblocks (i.e. txg has been committed) so that
* don't inadvertently clean out in-memory log records that would be required
* by zil_commit().
*/
void
zil_clean(zilog_t *zilog, uint64_t synced_txg)
{
itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
itxs_t *clean_me;
ASSERT3U(synced_txg, <, ZILTEST_TXG);
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
mutex_exit(&itxg->itxg_lock);
return;
}
ASSERT3U(itxg->itxg_txg, <=, synced_txg);
ASSERT3U(itxg->itxg_txg, !=, 0);
clean_me = itxg->itxg_itxs;
itxg->itxg_itxs = NULL;
itxg->itxg_txg = 0;
mutex_exit(&itxg->itxg_lock);
/*
* Preferably start a task queue to free up the old itxs but
* if taskq_dispatch can't allocate resources to do that then
* free it in-line. This should be rare. Note, using TQ_SLEEP
* created a bad performance problem.
*/
ASSERT3P(zilog->zl_dmu_pool, !=, NULL);
ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL);
taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq,
zil_itxg_clean, clean_me, TQ_NOSLEEP);
if (id == TASKQID_INVALID)
zil_itxg_clean(clean_me);
}
/*
* This function will traverse the queue of itxs that need to be
* committed, and move them onto the ZIL's zl_itx_commit_list.
*/
static void
zil_get_commit_list(zilog_t *zilog)
{
uint64_t otxg, txg;
list_t *commit_list = &zilog->zl_itx_commit_list;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing. That's okay since we'll
* only commit things in the future.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If we're adding itx records to the zl_itx_commit_list,
* then the zil better be dirty in this "txg". We can assert
* that here since we're holding the itxg_lock which will
* prevent spa_sync from cleaning it. Once we add the itxs
* to the zl_itx_commit_list we must commit it to disk even
* if it's unnecessary (i.e. the txg was synced).
*/
ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
mutex_exit(&itxg->itxg_lock);
}
}
/*
* Move the async itxs for a specified object to commit into sync lists.
*/
void
zil_async_to_sync(zilog_t *zilog, uint64_t foid)
{
uint64_t otxg, txg;
itx_async_node_t *ian;
avl_tree_t *t;
avl_index_t where;
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If a foid is specified then find that node and append its
* list. Otherwise walk the tree appending all the lists
* to the sync list. We add to the end rather than the
* beginning to ensure the create has happened.
*/
t = &itxg->itxg_itxs->i_async_tree;
if (foid != 0) {
ian = avl_find(t, &foid, &where);
if (ian != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
}
} else {
void *cookie = NULL;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
list_destroy(&ian->ia_list);
kmem_free(ian, sizeof (itx_async_node_t));
}
}
mutex_exit(&itxg->itxg_lock);
}
}
/*
* This function will prune commit itxs that are at the head of the
* commit list (it won't prune past the first non-commit itx), and
* either: a) attach them to the last lwb that's still pending
* completion, or b) skip them altogether.
*
* This is used as a performance optimization to prevent commit itxs
* from generating new lwbs when it's unnecessary to do so.
*/
static void
zil_prune_commit_list(zilog_t *zilog)
{
itx_t *itx;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
if (lrc->lrc_txtype != TX_COMMIT)
break;
mutex_enter(&zilog->zl_lock);
lwb_t *last_lwb = zilog->zl_last_lwb_opened;
if (last_lwb == NULL ||
last_lwb->lwb_state == LWB_STATE_FLUSH_DONE) {
/*
* All of the itxs this waiter was waiting on
* must have already completed (or there were
* never any itx's for it to wait on), so it's
* safe to skip this waiter and mark it done.
*/
zil_commit_waiter_skip(itx->itx_private);
} else {
zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
itx->itx_private = NULL;
}
mutex_exit(&zilog->zl_lock);
list_remove(&zilog->zl_itx_commit_list, itx);
zil_itx_destroy(itx);
}
IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
}
static void
zil_commit_writer_stall(zilog_t *zilog)
{
/*
* When zio_alloc_zil() fails to allocate the next lwb block on
* disk, we must call txg_wait_synced() to ensure all of the
* lwbs in the zilog's zl_lwb_list are synced and then freed (in
* zil_sync()), such that any subsequent ZIL writer (i.e. a call
* to zil_process_commit_list()) will have to call zil_create(),
* and start a new ZIL chain.
*
* Since zil_alloc_zil() failed, the lwb that was previously
* issued does not have a pointer to the "next" lwb on disk.
* Thus, if another ZIL writer thread was to allocate the "next"
* on-disk lwb, that block could be leaked in the event of a
* crash (because the previous lwb on-disk would not point to
* it).
*
* We must hold the zilog's zl_issuer_lock while we do this, to
* ensure no new threads enter zil_process_commit_list() until
* all lwb's in the zl_lwb_list have been synced and freed
* (which is achieved via the txg_wait_synced() call).
*/
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
txg_wait_synced(zilog->zl_dmu_pool, 0);
ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL);
}
/*
* This function will traverse the commit list, creating new lwbs as
* needed, and committing the itxs from the commit list to these newly
* created lwbs. Additionally, as a new lwb is created, the previous
* lwb will be issued to the zio layer to be written to disk.
*/
static void
zil_process_commit_list(zilog_t *zilog)
{
spa_t *spa = zilog->zl_spa;
list_t nolwb_itxs;
list_t nolwb_waiters;
lwb_t *lwb;
itx_t *itx;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
/*
* Return if there's nothing to commit before we dirty the fs by
* calling zil_create().
*/
if (list_head(&zilog->zl_itx_commit_list) == NULL)
return;
list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
lwb = list_tail(&zilog->zl_lwb_list);
if (lwb == NULL) {
lwb = zil_create(zilog);
} else {
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
}
while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
uint64_t txg = lrc->lrc_txg;
ASSERT3U(txg, !=, 0);
if (lrc->lrc_txtype == TX_COMMIT) {
DTRACE_PROBE2(zil__process__commit__itx,
zilog_t *, zilog, itx_t *, itx);
} else {
DTRACE_PROBE2(zil__process__normal__itx,
zilog_t *, zilog, itx_t *, itx);
}
list_remove(&zilog->zl_itx_commit_list, itx);
boolean_t synced = txg <= spa_last_synced_txg(spa);
boolean_t frozen = txg > spa_freeze_txg(spa);
/*
* If the txg of this itx has already been synced out, then
* we don't need to commit this itx to an lwb. This is
* because the data of this itx will have already been
* written to the main pool. This is inherently racy, and
* it's still ok to commit an itx whose txg has already
* been synced; this will result in a write that's
* unnecessary, but will do no harm.
*
* With that said, we always want to commit TX_COMMIT itxs
* to an lwb, regardless of whether or not that itx's txg
* has been synced out. We do this to ensure any OPENED lwb
* will always have at least one zil_commit_waiter_t linked
* to the lwb.
*
* As a counter-example, if we skipped TX_COMMIT itx's
* whose txg had already been synced, the following
* situation could occur if we happened to be racing with
* spa_sync:
*
* 1. We commit a non-TX_COMMIT itx to an lwb, where the
* itx's txg is 10 and the last synced txg is 9.
* 2. spa_sync finishes syncing out txg 10.
* 3. We move to the next itx in the list, it's a TX_COMMIT
* whose txg is 10, so we skip it rather than committing
* it to the lwb used in (1).
*
* If the itx that is skipped in (3) is the last TX_COMMIT
* itx in the commit list, than it's possible for the lwb
* used in (1) to remain in the OPENED state indefinitely.
*
* To prevent the above scenario from occurring, ensuring
* that once an lwb is OPENED it will transition to ISSUED
* and eventually DONE, we always commit TX_COMMIT itx's to
* an lwb here, even if that itx's txg has already been
* synced.
*
* Finally, if the pool is frozen, we _always_ commit the
* itx. The point of freezing the pool is to prevent data
* from being written to the main pool via spa_sync, and
* instead rely solely on the ZIL to persistently store the
* data; i.e. when the pool is frozen, the last synced txg
* value can't be trusted.
*/
if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) {
if (lwb != NULL) {
lwb = zil_lwb_commit(zilog, itx, lwb);
if (lwb == NULL)
list_insert_tail(&nolwb_itxs, itx);
else
list_insert_tail(&lwb->lwb_itxs, itx);
} else {
if (lrc->lrc_txtype == TX_COMMIT) {
zil_commit_waiter_link_nolwb(
itx->itx_private, &nolwb_waiters);
}
list_insert_tail(&nolwb_itxs, itx);
}
} else {
ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
}
if (lwb == NULL) {
/*
* This indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this happens, we must stall
* the ZIL write pipeline; see the comment within
* zil_commit_writer_stall() for more details.
*/
zil_commit_writer_stall(zilog);
/*
* Additionally, we have to signal and mark the "nolwb"
* waiters as "done" here, since without an lwb, we
* can't do this via zil_lwb_flush_vdevs_done() like
* normal.
*/
zil_commit_waiter_t *zcw;
while ((zcw = list_head(&nolwb_waiters)) != NULL) {
zil_commit_waiter_skip(zcw);
list_remove(&nolwb_waiters, zcw);
}
/*
* And finally, we have to destroy the itx's that
* couldn't be committed to an lwb; this will also call
* the itx's callback if one exists for the itx.
*/
while ((itx = list_head(&nolwb_itxs)) != NULL) {
list_remove(&nolwb_itxs, itx);
zil_itx_destroy(itx);
}
} else {
ASSERT(list_is_empty(&nolwb_waiters));
ASSERT3P(lwb, !=, NULL);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_WRITE_DONE);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_FLUSH_DONE);
/*
* At this point, the ZIL block pointed at by the "lwb"
* variable is in one of the following states: "closed"
* or "open".
*
* If it's "closed", then no itxs have been committed to
* it, so there's no point in issuing its zio (i.e. it's
* "empty").
*
* If it's "open", then it contains one or more itxs that
* eventually need to be committed to stable storage. In
* this case we intentionally do not issue the lwb's zio
* to disk yet, and instead rely on one of the following
* two mechanisms for issuing the zio:
*
* 1. Ideally, there will be more ZIL activity occurring
* on the system, such that this function will be
* immediately called again (not necessarily by the same
* thread) and this lwb's zio will be issued via
* zil_lwb_commit(). This way, the lwb is guaranteed to
* be "full" when it is issued to disk, and we'll make
* use of the lwb's size the best we can.
*
* 2. If there isn't sufficient ZIL activity occurring on
* the system, such that this lwb's zio isn't issued via
* zil_lwb_commit(), zil_commit_waiter() will issue the
* lwb's zio. If this occurs, the lwb is not guaranteed
* to be "full" by the time its zio is issued, and means
* the size of the lwb was "too large" given the amount
* of ZIL activity occurring on the system at that time.
*
* We do this for a couple of reasons:
*
* 1. To try and reduce the number of IOPs needed to
* write the same number of itxs. If an lwb has space
* available in its buffer for more itxs, and more itxs
* will be committed relatively soon (relative to the
* latency of performing a write), then it's beneficial
* to wait for these "next" itxs. This way, more itxs
* can be committed to stable storage with fewer writes.
*
* 2. To try and use the largest lwb block size that the
* incoming rate of itxs can support. Again, this is to
* try and pack as many itxs into as few lwbs as
* possible, without significantly impacting the latency
* of each individual itx.
*/
}
}
/*
* This function is responsible for ensuring the passed in commit waiter
* (and associated commit itx) is committed to an lwb. If the waiter is
* not already committed to an lwb, all itxs in the zilog's queue of
* itxs will be processed. The assumption is the passed in waiter's
* commit itx will found in the queue just like the other non-commit
* itxs, such that when the entire queue is processed, the waiter will
* have been committed to an lwb.
*
* The lwb associated with the passed in waiter is not guaranteed to
* have been issued by the time this function completes. If the lwb is
* not issued, we rely on future calls to zil_commit_writer() to issue
* the lwb, or the timeout mechanism found in zil_commit_waiter().
*/
static void
zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(spa_writeable(zilog->zl_spa));
mutex_enter(&zilog->zl_issuer_lock);
if (zcw->zcw_lwb != NULL || zcw->zcw_done) {
/*
* It's possible that, while we were waiting to acquire
* the "zl_issuer_lock", another thread committed this
* waiter to an lwb. If that occurs, we bail out early,
* without processing any of the zilog's queue of itxs.
*
* On certain workloads and system configurations, the
* "zl_issuer_lock" can become highly contended. In an
* attempt to reduce this contention, we immediately drop
* the lock if the waiter has already been processed.
*
* We've measured this optimization to reduce CPU spent
* contending on this lock by up to 5%, using a system
* with 32 CPUs, low latency storage (~50 usec writes),
* and 1024 threads performing sync writes.
*/
goto out;
}
ZIL_STAT_BUMP(zil_commit_writer_count);
zil_get_commit_list(zilog);
zil_prune_commit_list(zilog);
zil_process_commit_list(zilog);
out:
mutex_exit(&zilog->zl_issuer_lock);
}
static void
zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
lwb_t *lwb = zcw->zcw_lwb;
ASSERT3P(lwb, !=, NULL);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED);
/*
* If the lwb has already been issued by another thread, we can
* immediately return since there's no work to be done (the
* point of this function is to issue the lwb). Additionally, we
* do this prior to acquiring the zl_issuer_lock, to avoid
* acquiring it when it's not necessary to do so.
*/
if (lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE)
return;
/*
* In order to call zil_lwb_write_issue() we must hold the
* zilog's "zl_issuer_lock". We can't simply acquire that lock,
* since we're already holding the commit waiter's "zcw_lock",
* and those two locks are acquired in the opposite order
* elsewhere.
*/
mutex_exit(&zcw->zcw_lock);
mutex_enter(&zilog->zl_issuer_lock);
mutex_enter(&zcw->zcw_lock);
/*
* Since we just dropped and re-acquired the commit waiter's
* lock, we have to re-check to see if the waiter was marked
* "done" during that process. If the waiter was marked "done",
* the "lwb" pointer is no longer valid (it can be free'd after
* the waiter is marked "done"), so without this check we could
* wind up with a use-after-free error below.
*/
if (zcw->zcw_done)
goto out;
ASSERT3P(lwb, ==, zcw->zcw_lwb);
/*
* We've already checked this above, but since we hadn't acquired
* the zilog's zl_issuer_lock, we have to perform this check a
* second time while holding the lock.
*
* We don't need to hold the zl_lock since the lwb cannot transition
* from OPENED to ISSUED while we hold the zl_issuer_lock. The lwb
* _can_ transition from ISSUED to DONE, but it's OK to race with
* that transition since we treat the lwb the same, whether it's in
* the ISSUED or DONE states.
*
* The important thing, is we treat the lwb differently depending on
* if it's ISSUED or OPENED, and block any other threads that might
* attempt to issue this lwb. For that reason we hold the
* zl_issuer_lock when checking the lwb_state; we must not call
* zil_lwb_write_issue() if the lwb had already been issued.
*
* See the comment above the lwb_state_t structure definition for
* more details on the lwb states, and locking requirements.
*/
if (lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE)
goto out;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
/*
* As described in the comments above zil_commit_waiter() and
* zil_process_commit_list(), we need to issue this lwb's zio
* since we've reached the commit waiter's timeout and it still
* hasn't been issued.
*/
lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb);
IMPLY(nlwb != NULL, lwb->lwb_state != LWB_STATE_OPENED);
/*
* Since the lwb's zio hadn't been issued by the time this thread
* reached its timeout, we reset the zilog's "zl_cur_used" field
* to influence the zil block size selection algorithm.
*
* By having to issue the lwb's zio here, it means the size of the
* lwb was too large, given the incoming throughput of itxs. By
* setting "zl_cur_used" to zero, we communicate this fact to the
* block size selection algorithm, so it can take this information
* into account, and potentially select a smaller size for the
* next lwb block that is allocated.
*/
zilog->zl_cur_used = 0;
if (nlwb == NULL) {
/*
* When zil_lwb_write_issue() returns NULL, this
* indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this occurs, the ZIL write
* pipeline must be stalled; see the comment within the
* zil_commit_writer_stall() function for more details.
*
* We must drop the commit waiter's lock prior to
* calling zil_commit_writer_stall() or else we can wind
* up with the following deadlock:
*
* - This thread is waiting for the txg to sync while
* holding the waiter's lock; txg_wait_synced() is
* used within txg_commit_writer_stall().
*
* - The txg can't sync because it is waiting for this
* lwb's zio callback to call dmu_tx_commit().
*
* - The lwb's zio callback can't call dmu_tx_commit()
* because it's blocked trying to acquire the waiter's
* lock, which occurs prior to calling dmu_tx_commit()
*/
mutex_exit(&zcw->zcw_lock);
zil_commit_writer_stall(zilog);
mutex_enter(&zcw->zcw_lock);
}
out:
mutex_exit(&zilog->zl_issuer_lock);
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
}
/*
* This function is responsible for performing the following two tasks:
*
* 1. its primary responsibility is to block until the given "commit
* waiter" is considered "done".
*
* 2. its secondary responsibility is to issue the zio for the lwb that
* the given "commit waiter" is waiting on, if this function has
* waited "long enough" and the lwb is still in the "open" state.
*
* Given a sufficient amount of itxs being generated and written using
* the ZIL, the lwb's zio will be issued via the zil_lwb_commit()
* function. If this does not occur, this secondary responsibility will
* ensure the lwb is issued even if there is not other synchronous
* activity on the system.
*
* For more details, see zil_process_commit_list(); more specifically,
* the comment at the bottom of that function.
*/
static void
zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(spa_writeable(zilog->zl_spa));
mutex_enter(&zcw->zcw_lock);
/*
* The timeout is scaled based on the lwb latency to avoid
* significantly impacting the latency of each individual itx.
* For more details, see the comment at the bottom of the
* zil_process_commit_list() function.
*/
int pct = MAX(zfs_commit_timeout_pct, 1);
hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100;
hrtime_t wakeup = gethrtime() + sleep;
boolean_t timedout = B_FALSE;
while (!zcw->zcw_done) {
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
lwb_t *lwb = zcw->zcw_lwb;
/*
* Usually, the waiter will have a non-NULL lwb field here,
* but it's possible for it to be NULL as a result of
* zil_commit() racing with spa_sync().
*
* When zil_clean() is called, it's possible for the itxg
* list (which may be cleaned via a taskq) to contain
* commit itxs. When this occurs, the commit waiters linked
* off of these commit itxs will not be committed to an
* lwb. Additionally, these commit waiters will not be
* marked done until zil_commit_waiter_skip() is called via
* zil_itxg_clean().
*
* Thus, it's possible for this commit waiter (i.e. the
* "zcw" variable) to be found in this "in between" state;
* where it's "zcw_lwb" field is NULL, and it hasn't yet
* been skipped, so it's "zcw_done" field is still B_FALSE.
*/
IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED);
if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) {
ASSERT3B(timedout, ==, B_FALSE);
/*
* If the lwb hasn't been issued yet, then we
* need to wait with a timeout, in case this
* function needs to issue the lwb after the
* timeout is reached; responsibility (2) from
* the comment above this function.
*/
int rc = cv_timedwait_hires(&zcw->zcw_cv,
&zcw->zcw_lock, wakeup, USEC2NSEC(1),
CALLOUT_FLAG_ABSOLUTE);
if (rc != -1 || zcw->zcw_done)
continue;
timedout = B_TRUE;
zil_commit_waiter_timeout(zilog, zcw);
if (!zcw->zcw_done) {
/*
* If the commit waiter has already been
* marked "done", it's possible for the
* waiter's lwb structure to have already
* been freed. Thus, we can only reliably
* make these assertions if the waiter
* isn't done.
*/
ASSERT3P(lwb, ==, zcw->zcw_lwb);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
}
} else {
/*
* If the lwb isn't open, then it must have already
* been issued. In that case, there's no need to
* use a timeout when waiting for the lwb to
* complete.
*
* Additionally, if the lwb is NULL, the waiter
* will soon be signaled and marked done via
* zil_clean() and zil_itxg_clean(), so no timeout
* is required.
*/
IMPLY(lwb != NULL,
lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_WRITE_DONE ||
lwb->lwb_state == LWB_STATE_FLUSH_DONE);
cv_wait(&zcw->zcw_cv, &zcw->zcw_lock);
}
}
mutex_exit(&zcw->zcw_lock);
}
static zil_commit_waiter_t *
zil_alloc_commit_waiter(void)
{
zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP);
cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL);
list_link_init(&zcw->zcw_node);
zcw->zcw_lwb = NULL;
zcw->zcw_done = B_FALSE;
zcw->zcw_zio_error = 0;
return (zcw);
}
static void
zil_free_commit_waiter(zil_commit_waiter_t *zcw)
{
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
ASSERT3B(zcw->zcw_done, ==, B_TRUE);
mutex_destroy(&zcw->zcw_lock);
cv_destroy(&zcw->zcw_cv);
kmem_cache_free(zil_zcw_cache, zcw);
}
/*
* This function is used to create a TX_COMMIT itx and assign it. This
* way, it will be linked into the ZIL's list of synchronous itxs, and
* then later committed to an lwb (or skipped) when
* zil_process_commit_list() is called.
*/
static void
zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t));
itx->itx_sync = B_TRUE;
itx->itx_private = zcw;
zil_itx_assign(zilog, itx, tx);
dmu_tx_commit(tx);
}
/*
* Commit ZFS Intent Log transactions (itxs) to stable storage.
*
* When writing ZIL transactions to the on-disk representation of the
* ZIL, the itxs are committed to a Log Write Block (lwb). Multiple
* itxs can be committed to a single lwb. Once a lwb is written and
* committed to stable storage (i.e. the lwb is written, and vdevs have
* been flushed), each itx that was committed to that lwb is also
* considered to be committed to stable storage.
*
* When an itx is committed to an lwb, the log record (lr_t) contained
* by the itx is copied into the lwb's zio buffer, and once this buffer
* is written to disk, it becomes an on-disk ZIL block.
*
* As itxs are generated, they're inserted into the ZIL's queue of
* uncommitted itxs. The semantics of zil_commit() are such that it will
* block until all itxs that were in the queue when it was called, are
* committed to stable storage.
*
* If "foid" is zero, this means all "synchronous" and "asynchronous"
* itxs, for all objects in the dataset, will be committed to stable
* storage prior to zil_commit() returning. If "foid" is non-zero, all
* "synchronous" itxs for all objects, but only "asynchronous" itxs
* that correspond to the foid passed in, will be committed to stable
* storage prior to zil_commit() returning.
*
* Generally speaking, when zil_commit() is called, the consumer doesn't
* actually care about _all_ of the uncommitted itxs. Instead, they're
* simply trying to waiting for a specific itx to be committed to disk,
* but the interface(s) for interacting with the ZIL don't allow such
* fine-grained communication. A better interface would allow a consumer
* to create and assign an itx, and then pass a reference to this itx to
* zil_commit(); such that zil_commit() would return as soon as that
* specific itx was committed to disk (instead of waiting for _all_
* itxs to be committed).
*
* When a thread calls zil_commit() a special "commit itx" will be
* generated, along with a corresponding "waiter" for this commit itx.
* zil_commit() will wait on this waiter's CV, such that when the waiter
* is marked done, and signaled, zil_commit() will return.
*
* This commit itx is inserted into the queue of uncommitted itxs. This
* provides an easy mechanism for determining which itxs were in the
* queue prior to zil_commit() having been called, and which itxs were
* added after zil_commit() was called.
*
* The commit it is special; it doesn't have any on-disk representation.
* When a commit itx is "committed" to an lwb, the waiter associated
* with it is linked onto the lwb's list of waiters. Then, when that lwb
* completes, each waiter on the lwb's list is marked done and signaled
* -- allowing the thread waiting on the waiter to return from zil_commit().
*
* It's important to point out a few critical factors that allow us
* to make use of the commit itxs, commit waiters, per-lwb lists of
* commit waiters, and zio completion callbacks like we're doing:
*
* 1. The list of waiters for each lwb is traversed, and each commit
* waiter is marked "done" and signaled, in the zio completion
* callback of the lwb's zio[*].
*
* * Actually, the waiters are signaled in the zio completion
* callback of the root zio for the DKIOCFLUSHWRITECACHE commands
* that are sent to the vdevs upon completion of the lwb zio.
*
* 2. When the itxs are inserted into the ZIL's queue of uncommitted
* itxs, the order in which they are inserted is preserved[*]; as
* itxs are added to the queue, they are added to the tail of
* in-memory linked lists.
*
* When committing the itxs to lwbs (to be written to disk), they
* are committed in the same order in which the itxs were added to
* the uncommitted queue's linked list(s); i.e. the linked list of
* itxs to commit is traversed from head to tail, and each itx is
* committed to an lwb in that order.
*
* * To clarify:
*
* - the order of "sync" itxs is preserved w.r.t. other
* "sync" itxs, regardless of the corresponding objects.
* - the order of "async" itxs is preserved w.r.t. other
* "async" itxs corresponding to the same object.
* - the order of "async" itxs is *not* preserved w.r.t. other
* "async" itxs corresponding to different objects.
* - the order of "sync" itxs w.r.t. "async" itxs (or vice
* versa) is *not* preserved, even for itxs that correspond
* to the same object.
*
* For more details, see: zil_itx_assign(), zil_async_to_sync(),
* zil_get_commit_list(), and zil_process_commit_list().
*
* 3. The lwbs represent a linked list of blocks on disk. Thus, any
* lwb cannot be considered committed to stable storage, until its
* "previous" lwb is also committed to stable storage. This fact,
* coupled with the fact described above, means that itxs are
* committed in (roughly) the order in which they were generated.
* This is essential because itxs are dependent on prior itxs.
* Thus, we *must not* deem an itx as being committed to stable
* storage, until *all* prior itxs have also been committed to
* stable storage.
*
* To enforce this ordering of lwb zio's, while still leveraging as
* much of the underlying storage performance as possible, we rely
* on two fundamental concepts:
*
* 1. The creation and issuance of lwb zio's is protected by
* the zilog's "zl_issuer_lock", which ensures only a single
* thread is creating and/or issuing lwb's at a time
* 2. The "previous" lwb is a child of the "current" lwb
* (leveraging the zio parent-child dependency graph)
*
* By relying on this parent-child zio relationship, we can have
* many lwb zio's concurrently issued to the underlying storage,
* but the order in which they complete will be the same order in
* which they were created.
*/
void
zil_commit(zilog_t *zilog, uint64_t foid)
{
/*
* We should never attempt to call zil_commit on a snapshot for
* a couple of reasons:
*
* 1. A snapshot may never be modified, thus it cannot have any
* in-flight itxs that would have modified the dataset.
*
* 2. By design, when zil_commit() is called, a commit itx will
* be assigned to this zilog; as a result, the zilog will be
* dirtied. We must not dirty the zilog of a snapshot; there's
* checks in the code that enforce this invariant, and will
* cause a panic if it's not upheld.
*/
ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE);
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return;
if (!spa_writeable(zilog->zl_spa)) {
/*
* If the SPA is not writable, there should never be any
* pending itxs waiting to be committed to disk. If that
* weren't true, we'd skip writing those itxs out, and
* would break the semantics of zil_commit(); thus, we're
* verifying that truth before we return to the caller.
*/
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
for (int i = 0; i < TXG_SIZE; i++)
ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL);
return;
}
/*
* If the ZIL is suspended, we don't want to dirty it by calling
* zil_commit_itx_assign() below, nor can we write out
* lwbs like would be done in zil_commit_write(). Thus, we
* simply rely on txg_wait_synced() to maintain the necessary
* semantics, and avoid calling those functions altogether.
*/
if (zilog->zl_suspend > 0) {
txg_wait_synced(zilog->zl_dmu_pool, 0);
return;
}
zil_commit_impl(zilog, foid);
}
void
zil_commit_impl(zilog_t *zilog, uint64_t foid)
{
ZIL_STAT_BUMP(zil_commit_count);
/*
* Move the "async" itxs for the specified foid to the "sync"
* queues, such that they will be later committed (or skipped)
* to an lwb when zil_process_commit_list() is called.
*
* Since these "async" itxs must be committed prior to this
* call to zil_commit returning, we must perform this operation
* before we call zil_commit_itx_assign().
*/
zil_async_to_sync(zilog, foid);
/*
* We allocate a new "waiter" structure which will initially be
* linked to the commit itx using the itx's "itx_private" field.
* Since the commit itx doesn't represent any on-disk state,
* when it's committed to an lwb, rather than copying the its
* lr_t into the lwb's buffer, the commit itx's "waiter" will be
* added to the lwb's list of waiters. Then, when the lwb is
* committed to stable storage, each waiter in the lwb's list of
* waiters will be marked "done", and signalled.
*
* We must create the waiter and assign the commit itx prior to
* calling zil_commit_writer(), or else our specific commit itx
* is not guaranteed to be committed to an lwb prior to calling
* zil_commit_waiter().
*/
zil_commit_waiter_t *zcw = zil_alloc_commit_waiter();
zil_commit_itx_assign(zilog, zcw);
zil_commit_writer(zilog, zcw);
zil_commit_waiter(zilog, zcw);
if (zcw->zcw_zio_error != 0) {
/*
* If there was an error writing out the ZIL blocks that
* this thread is waiting on, then we fallback to
* relying on spa_sync() to write out the data this
* thread is waiting on. Obviously this has performance
* implications, but the expectation is for this to be
* an exceptional case, and shouldn't occur often.
*/
DTRACE_PROBE2(zil__commit__io__error,
zilog_t *, zilog, zil_commit_waiter_t *, zcw);
txg_wait_synced(zilog->zl_dmu_pool, 0);
}
zil_free_commit_waiter(zcw);
}
/*
* Called in syncing context to free committed log blocks and update log header.
*/
void
zil_sync(zilog_t *zilog, dmu_tx_t *tx)
{
zil_header_t *zh = zil_header_in_syncing_context(zilog);
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = zilog->zl_spa;
uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
lwb_t *lwb;
/*
* We don't zero out zl_destroy_txg, so make sure we don't try
* to destroy it twice.
*/
if (spa_sync_pass(spa) != 1)
return;
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_stop_sync == 0);
if (*replayed_seq != 0) {
ASSERT(zh->zh_replay_seq < *replayed_seq);
zh->zh_replay_seq = *replayed_seq;
*replayed_seq = 0;
}
if (zilog->zl_destroy_txg == txg) {
blkptr_t blk = zh->zh_log;
ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
bzero(zh, sizeof (zil_header_t));
bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
if (zilog->zl_keep_first) {
/*
* If this block was part of log chain that couldn't
* be claimed because a device was missing during
* zil_claim(), but that device later returns,
* then this block could erroneously appear valid.
* To guard against this, assign a new GUID to the new
* log chain so it doesn't matter what blk points to.
*/
zil_init_log_chain(zilog, &blk);
zh->zh_log = blk;
}
}
while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
zh->zh_log = lwb->lwb_blk;
if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
break;
list_remove(&zilog->zl_lwb_list, lwb);
zio_free(spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
/*
* If we don't have anything left in the lwb list then
* we've had an allocation failure and we need to zero
* out the zil_header blkptr so that we don't end
* up freeing the same block twice.
*/
if (list_head(&zilog->zl_lwb_list) == NULL)
BP_ZERO(&zh->zh_log);
}
/*
* Remove fastwrite on any blocks that have been pre-allocated for
* the next commit. This prevents fastwrite counter pollution by
* unused, long-lived LWBs.
*/
for (; lwb != NULL; lwb = list_next(&zilog->zl_lwb_list, lwb)) {
if (lwb->lwb_fastwrite && !lwb->lwb_write_zio) {
metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
lwb->lwb_fastwrite = 0;
}
}
mutex_exit(&zilog->zl_lock);
}
/* ARGSUSED */
static int
zil_lwb_cons(void *vbuf, void *unused, int kmflag)
{
lwb_t *lwb = vbuf;
list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare,
sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
return (0);
}
/* ARGSUSED */
static void
zil_lwb_dest(void *vbuf, void *unused)
{
lwb_t *lwb = vbuf;
mutex_destroy(&lwb->lwb_vdev_lock);
avl_destroy(&lwb->lwb_vdev_tree);
list_destroy(&lwb->lwb_waiters);
list_destroy(&lwb->lwb_itxs);
}
void
zil_init(void)
{
zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0);
zil_zcw_cache = kmem_cache_create("zil_zcw_cache",
sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zil_ksp = kstat_create("zfs", 0, "zil", "misc",
KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (zil_ksp != NULL) {
zil_ksp->ks_data = &zil_stats;
kstat_install(zil_ksp);
}
}
void
zil_fini(void)
{
kmem_cache_destroy(zil_zcw_cache);
kmem_cache_destroy(zil_lwb_cache);
if (zil_ksp != NULL) {
kstat_delete(zil_ksp);
zil_ksp = NULL;
}
}
void
zil_set_sync(zilog_t *zilog, uint64_t sync)
{
zilog->zl_sync = sync;
}
void
zil_set_logbias(zilog_t *zilog, uint64_t logbias)
{
zilog->zl_logbias = logbias;
}
zilog_t *
zil_alloc(objset_t *os, zil_header_t *zh_phys)
{
zilog_t *zilog;
zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
zilog->zl_header = zh_phys;
zilog->zl_os = os;
zilog->zl_spa = dmu_objset_spa(os);
zilog->zl_dmu_pool = dmu_objset_pool(os);
zilog->zl_destroy_txg = TXG_INITIAL - 1;
zilog->zl_logbias = dmu_objset_logbias(os);
zilog->zl_sync = dmu_objset_syncprop(os);
zilog->zl_dirty_max_txg = 0;
zilog->zl_last_lwb_opened = NULL;
zilog->zl_last_lwb_latency = 0;
zilog->zl_max_block_size = zil_maxblocksize;
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL);
for (int i = 0; i < TXG_SIZE; i++) {
mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
MUTEX_DEFAULT, NULL);
}
list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
offsetof(lwb_t, lwb_node));
list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
return (zilog);
}
void
zil_free(zilog_t *zilog)
{
int i;
zilog->zl_stop_sync = 1;
ASSERT0(zilog->zl_suspend);
ASSERT0(zilog->zl_suspending);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
list_destroy(&zilog->zl_lwb_list);
ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
list_destroy(&zilog->zl_itx_commit_list);
for (i = 0; i < TXG_SIZE; i++) {
/*
* It's possible for an itx to be generated that doesn't dirty
* a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
* callback to remove the entry. We remove those here.
*
* Also free up the ziltest itxs.
*/
if (zilog->zl_itxg[i].itxg_itxs)
zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
}
mutex_destroy(&zilog->zl_issuer_lock);
mutex_destroy(&zilog->zl_lock);
cv_destroy(&zilog->zl_cv_suspend);
kmem_free(zilog, sizeof (zilog_t));
}
/*
* Open an intent log.
*/
zilog_t *
zil_open(objset_t *os, zil_get_data_t *get_data)
{
zilog_t *zilog = dmu_objset_zil(os);
ASSERT3P(zilog->zl_get_data, ==, NULL);
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
zilog->zl_get_data = get_data;
return (zilog);
}
/*
* Close an intent log.
*/
void
zil_close(zilog_t *zilog)
{
lwb_t *lwb;
uint64_t txg;
if (!dmu_objset_is_snapshot(zilog->zl_os)) {
zil_commit(zilog, 0);
} else {
ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL);
ASSERT0(zilog->zl_dirty_max_txg);
ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE);
}
mutex_enter(&zilog->zl_lock);
lwb = list_tail(&zilog->zl_lwb_list);
if (lwb == NULL)
txg = zilog->zl_dirty_max_txg;
else
txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg);
mutex_exit(&zilog->zl_lock);
/*
* We need to use txg_wait_synced() to wait long enough for the
* ZIL to be clean, and to wait for all pending lwbs to be
* written out.
*/
if (txg != 0)
txg_wait_synced(zilog->zl_dmu_pool, txg);
if (zilog_is_dirty(zilog))
zfs_dbgmsg("zil (%px) is dirty, txg %llu", zilog,
(u_longlong_t)txg);
if (txg < spa_freeze_txg(zilog->zl_spa))
VERIFY(!zilog_is_dirty(zilog));
zilog->zl_get_data = NULL;
/*
* We should have only one lwb left on the list; remove it now.
*/
mutex_enter(&zilog->zl_lock);
lwb = list_head(&zilog->zl_lwb_list);
if (lwb != NULL) {
ASSERT3P(lwb, ==, list_tail(&zilog->zl_lwb_list));
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
if (lwb->lwb_fastwrite)
metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
list_remove(&zilog->zl_lwb_list, lwb);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
zil_free_lwb(zilog, lwb);
}
mutex_exit(&zilog->zl_lock);
}
static char *suspend_tag = "zil suspending";
/*
* Suspend an intent log. While in suspended mode, we still honor
* synchronous semantics, but we rely on txg_wait_synced() to do it.
* On old version pools, we suspend the log briefly when taking a
* snapshot so that it will have an empty intent log.
*
* Long holds are not really intended to be used the way we do here --
* held for such a short time. A concurrent caller of dsl_dataset_long_held()
* could fail. Therefore we take pains to only put a long hold if it is
* actually necessary. Fortunately, it will only be necessary if the
* objset is currently mounted (or the ZVOL equivalent). In that case it
* will already have a long hold, so we are not really making things any worse.
*
* Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
* zvol_state_t), and use their mechanism to prevent their hold from being
* dropped (e.g. VFS_HOLD()). However, that would be even more pain for
* very little gain.
*
* if cookiep == NULL, this does both the suspend & resume.
* Otherwise, it returns with the dataset "long held", and the cookie
* should be passed into zil_resume().
*/
int
zil_suspend(const char *osname, void **cookiep)
{
objset_t *os;
zilog_t *zilog;
const zil_header_t *zh;
int error;
error = dmu_objset_hold(osname, suspend_tag, &os);
if (error != 0)
return (error);
zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
zh = zilog->zl_header;
if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (SET_ERROR(EBUSY));
}
/*
* Don't put a long hold in the cases where we can avoid it. This
* is when there is no cookie so we are doing a suspend & resume
* (i.e. called from zil_vdev_offline()), and there's nothing to do
* for the suspend because it's already suspended, or there's no ZIL.
*/
if (cookiep == NULL && !zilog->zl_suspending &&
(zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (0);
}
dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
zilog->zl_suspend++;
if (zilog->zl_suspend > 1) {
/*
* Someone else is already suspending it.
* Just wait for them to finish.
*/
while (zilog->zl_suspending)
cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
mutex_exit(&zilog->zl_lock);
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
/*
* If there is no pointer to an on-disk block, this ZIL must not
* be active (e.g. filesystem not mounted), so there's nothing
* to clean up.
*/
if (BP_IS_HOLE(&zh->zh_log)) {
ASSERT(cookiep != NULL); /* fast path already handled */
*cookiep = os;
mutex_exit(&zilog->zl_lock);
return (0);
}
/*
* The ZIL has work to do. Ensure that the associated encryption
* key will remain mapped while we are committing the log by
* grabbing a reference to it. If the key isn't loaded we have no
* choice but to return an error until the wrapping key is loaded.
*/
if (os->os_encrypted &&
dsl_dataset_create_key_mapping(dmu_objset_ds(os)) != 0) {
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
return (SET_ERROR(EACCES));
}
zilog->zl_suspending = B_TRUE;
mutex_exit(&zilog->zl_lock);
/*
* We need to use zil_commit_impl to ensure we wait for all
* LWB_STATE_OPENED and LWB_STATE_ISSUED lwbs to be committed
* to disk before proceeding. If we used zil_commit instead, it
* would just call txg_wait_synced(), because zl_suspend is set.
* txg_wait_synced() doesn't wait for these lwb's to be
* LWB_STATE_FLUSH_DONE before returning.
*/
zil_commit_impl(zilog, 0);
/*
* Now that we've ensured all lwb's are LWB_STATE_FLUSH_DONE, we
* use txg_wait_synced() to ensure the data from the zilog has
* migrated to the main pool before calling zil_destroy().
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zil_destroy(zilog, B_FALSE);
mutex_enter(&zilog->zl_lock);
zilog->zl_suspending = B_FALSE;
cv_broadcast(&zilog->zl_cv_suspend);
mutex_exit(&zilog->zl_lock);
if (os->os_encrypted)
dsl_dataset_remove_key_mapping(dmu_objset_ds(os));
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
void
zil_resume(void *cookie)
{
objset_t *os = cookie;
zilog_t *zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_suspend != 0);
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
}
typedef struct zil_replay_arg {
zil_replay_func_t **zr_replay;
void *zr_arg;
boolean_t zr_byteswap;
char *zr_lr;
} zil_replay_arg_t;
static int
zil_replay_error(zilog_t *zilog, const lr_t *lr, int error)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog->zl_replaying_seq--; /* didn't actually replay this one */
dmu_objset_name(zilog->zl_os, name);
cmn_err(CE_WARN, "ZFS replay transaction error %d, "
"dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
(u_longlong_t)lr->lrc_seq,
(u_longlong_t)(lr->lrc_txtype & ~TX_CI),
(lr->lrc_txtype & TX_CI) ? "CI" : "");
return (error);
}
static int
zil_replay_log_record(zilog_t *zilog, const lr_t *lr, void *zra,
uint64_t claim_txg)
{
zil_replay_arg_t *zr = zra;
const zil_header_t *zh = zilog->zl_header;
uint64_t reclen = lr->lrc_reclen;
uint64_t txtype = lr->lrc_txtype;
int error = 0;
zilog->zl_replaying_seq = lr->lrc_seq;
if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */
return (0);
if (lr->lrc_txg < claim_txg) /* already committed */
return (0);
/* Strip case-insensitive bit, still present in log record */
txtype &= ~TX_CI;
if (txtype == 0 || txtype >= TX_MAX_TYPE)
return (zil_replay_error(zilog, lr, EINVAL));
/*
* If this record type can be logged out of order, the object
* (lr_foid) may no longer exist. That's legitimate, not an error.
*/
if (TX_OOO(txtype)) {
error = dmu_object_info(zilog->zl_os,
LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL);
if (error == ENOENT || error == EEXIST)
return (0);
}
/*
* Make a copy of the data so we can revise and extend it.
*/
bcopy(lr, zr->zr_lr, reclen);
/*
* If this is a TX_WRITE with a blkptr, suck in the data.
*/
if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
error = zil_read_log_data(zilog, (lr_write_t *)lr,
zr->zr_lr + reclen);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
/*
* The log block containing this lr may have been byteswapped
* so that we can easily examine common fields like lrc_txtype.
* However, the log is a mix of different record types, and only the
* replay vectors know how to byteswap their records. Therefore, if
* the lr was byteswapped, undo it before invoking the replay vector.
*/
if (zr->zr_byteswap)
byteswap_uint64_array(zr->zr_lr, reclen);
/*
* We must now do two things atomically: replay this log record,
* and update the log header sequence number to reflect the fact that
* we did so. At the end of each replay function the sequence number
* is updated if we are in replay mode.
*/
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
if (error != 0) {
/*
* The DMU's dnode layer doesn't see removes until the txg
* commits, so a subsequent claim can spuriously fail with
* EEXIST. So if we receive any error we try syncing out
* any removes then retry the transaction. Note that we
* specify B_FALSE for byteswap now, so we don't do it twice.
*/
txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
return (0);
}
/* ARGSUSED */
static int
zil_incr_blks(zilog_t *zilog, const blkptr_t *bp, void *arg, uint64_t claim_txg)
{
zilog->zl_replay_blks++;
return (0);
}
/*
* If this dataset has a non-empty intent log, replay it and destroy it.
*/
void
zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
{
zilog_t *zilog = dmu_objset_zil(os);
const zil_header_t *zh = zilog->zl_header;
zil_replay_arg_t zr;
if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
zil_destroy(zilog, B_TRUE);
return;
}
zr.zr_replay = replay_func;
zr.zr_arg = arg;
zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
/*
* Wait for in-progress removes to sync before starting replay.
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zilog->zl_replay = B_TRUE;
zilog->zl_replay_time = ddi_get_lbolt();
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg, B_TRUE);
vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
zil_destroy(zilog, B_FALSE);
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_replay = B_FALSE;
}
boolean_t
zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
{
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return (B_TRUE);
if (zilog->zl_replay) {
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
zilog->zl_replaying_seq;
return (B_TRUE);
}
return (B_FALSE);
}
/* ARGSUSED */
int
zil_reset(const char *osname, void *arg)
{
int error;
error = zil_suspend(osname, NULL);
/* EACCES means crypto key not loaded */
if ((error == EACCES) || (error == EBUSY))
return (SET_ERROR(error));
if (error != 0)
return (SET_ERROR(EEXIST));
return (0);
}
EXPORT_SYMBOL(zil_alloc);
EXPORT_SYMBOL(zil_free);
EXPORT_SYMBOL(zil_open);
EXPORT_SYMBOL(zil_close);
EXPORT_SYMBOL(zil_replay);
EXPORT_SYMBOL(zil_replaying);
EXPORT_SYMBOL(zil_destroy);
EXPORT_SYMBOL(zil_destroy_sync);
EXPORT_SYMBOL(zil_itx_create);
EXPORT_SYMBOL(zil_itx_destroy);
EXPORT_SYMBOL(zil_itx_assign);
EXPORT_SYMBOL(zil_commit);
EXPORT_SYMBOL(zil_claim);
EXPORT_SYMBOL(zil_check_log_chain);
EXPORT_SYMBOL(zil_sync);
EXPORT_SYMBOL(zil_clean);
EXPORT_SYMBOL(zil_suspend);
EXPORT_SYMBOL(zil_resume);
EXPORT_SYMBOL(zil_lwb_add_block);
EXPORT_SYMBOL(zil_bp_tree_add);
EXPORT_SYMBOL(zil_set_sync);
EXPORT_SYMBOL(zil_set_logbias);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs, zfs_, commit_timeout_pct, INT, ZMOD_RW,
"ZIL block open timeout percentage");
ZFS_MODULE_PARAM(zfs_zil, zil_, replay_disable, INT, ZMOD_RW,
"Disable intent logging replay");
ZFS_MODULE_PARAM(zfs_zil, zil_, nocacheflush, INT, ZMOD_RW,
"Disable ZIL cache flushes");
ZFS_MODULE_PARAM(zfs_zil, zil_, slog_bulk, ULONG, ZMOD_RW,
"Limit in bytes slog sync writes per commit");
ZFS_MODULE_PARAM(zfs_zil, zil_, maxblocksize, INT, ZMOD_RW,
"Limit in bytes of ZIL log block size");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/zio.c b/sys/contrib/openzfs/module/zfs/zio.c
index 76ed4fad4304..c016fa323b41 100644
--- a/sys/contrib/openzfs/module/zfs/zio.c
+++ b/sys/contrib/openzfs/module/zfs/zio.c
@@ -1,5042 +1,5054 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2020 by Delphix. All rights reserved.
* Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2017, Intel Corporation.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
* Copyright (c) 2021, Datto, Inc.
*/
#include <sys/sysmacros.h>
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_trim.h>
#include <sys/zio_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/ddt.h>
#include <sys/blkptr.h>
#include <sys/zfeature.h>
#include <sys/dsl_scan.h>
#include <sys/metaslab_impl.h>
#include <sys/time.h>
#include <sys/trace_zfs.h>
#include <sys/abd.h>
#include <sys/dsl_crypt.h>
#include <cityhash.h>
/*
* ==========================================================================
* I/O type descriptions
* ==========================================================================
*/
const char *zio_type_name[ZIO_TYPES] = {
/*
* Note: Linux kernel thread name length is limited
* so these names will differ from upstream open zfs.
*/
"z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl", "z_trim"
};
int zio_dva_throttle_enabled = B_TRUE;
int zio_deadman_log_all = B_FALSE;
/*
* ==========================================================================
* I/O kmem caches
* ==========================================================================
*/
kmem_cache_t *zio_cache;
kmem_cache_t *zio_link_cache;
kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#endif
/* Mark IOs as "slow" if they take longer than 30 seconds */
int zio_slow_io_ms = (30 * MILLISEC);
#define BP_SPANB(indblkshift, level) \
(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
#define COMPARE_META_LEVEL 0x80000000ul
/*
* The following actions directly effect the spa's sync-to-convergence logic.
* The values below define the sync pass when we start performing the action.
* Care should be taken when changing these values as they directly impact
* spa_sync() performance. Tuning these values may introduce subtle performance
* pathologies and should only be done in the context of performance analysis.
* These tunables will eventually be removed and replaced with #defines once
* enough analysis has been done to determine optimal values.
*
* The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
* regular blocks are not deferred.
*
* Starting in sync pass 8 (zfs_sync_pass_dont_compress), we disable
* compression (including of metadata). In practice, we don't have this
* many sync passes, so this has no effect.
*
* The original intent was that disabling compression would help the sync
* passes to converge. However, in practice disabling compression increases
* the average number of sync passes, because when we turn compression off, a
* lot of block's size will change and thus we have to re-allocate (not
* overwrite) them. It also increases the number of 128KB allocations (e.g.
* for indirect blocks and spacemaps) because these will not be compressed.
* The 128K allocations are especially detrimental to performance on highly
* fragmented systems, which may have very few free segments of this size,
* and may need to load new metaslabs to satisfy 128K allocations.
*/
int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
int zfs_sync_pass_dont_compress = 8; /* don't compress starting in this pass */
int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */
/*
* An allocating zio is one that either currently has the DVA allocate
* stage set or will have it later in its lifetime.
*/
#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
/*
* Enable smaller cores by excluding metadata
* allocations as well.
*/
int zio_exclude_metadata = 0;
int zio_requeue_io_start_cut_in_line = 1;
#ifdef ZFS_DEBUG
int zio_buf_debug_limit = 16384;
#else
int zio_buf_debug_limit = 0;
#endif
static inline void __zio_execute(zio_t *zio);
static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
void
zio_init(void)
{
size_t c;
zio_cache = kmem_cache_create("zio_cache",
sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zio_link_cache = kmem_cache_create("zio_link_cache",
sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
/*
* For small buffers, we want a cache for each multiple of
* SPA_MINBLOCKSIZE. For larger buffers, we want a cache
* for each quarter-power of 2.
*/
for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
size_t p2 = size;
size_t align = 0;
size_t data_cflags, cflags;
data_cflags = KMC_NODEBUG;
cflags = (zio_exclude_metadata || size > zio_buf_debug_limit) ?
KMC_NODEBUG : 0;
#if defined(_ILP32) && defined(_KERNEL)
/*
* Cache size limited to 1M on 32-bit platforms until ARC
* buffers no longer require virtual address space.
*/
if (size > zfs_max_recordsize)
break;
#endif
while (!ISP2(p2))
p2 &= p2 - 1;
#ifndef _KERNEL
/*
* If we are using watchpoints, put each buffer on its own page,
* to eliminate the performance overhead of trapping to the
* kernel when modifying a non-watched buffer that shares the
* page with a watched buffer.
*/
if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
continue;
/*
* Here's the problem - on 4K native devices in userland on
* Linux using O_DIRECT, buffers must be 4K aligned or I/O
* will fail with EINVAL, causing zdb (and others) to coredump.
* Since userland probably doesn't need optimized buffer caches,
* we just force 4K alignment on everything.
*/
align = 8 * SPA_MINBLOCKSIZE;
#else
if (size < PAGESIZE) {
align = SPA_MINBLOCKSIZE;
} else if (IS_P2ALIGNED(size, p2 >> 2)) {
align = PAGESIZE;
}
#endif
if (align != 0) {
char name[36];
if (cflags == data_cflags) {
/*
* Resulting kmem caches would be identical.
* Save memory by creating only one.
*/
(void) snprintf(name, sizeof (name),
"zio_buf_comb_%lu", (ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name,
size, align, NULL, NULL, NULL, NULL, NULL,
cflags);
zio_data_buf_cache[c] = zio_buf_cache[c];
continue;
}
(void) snprintf(name, sizeof (name), "zio_buf_%lu",
(ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL, cflags);
(void) snprintf(name, sizeof (name), "zio_data_buf_%lu",
(ulong_t)size);
zio_data_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL, data_cflags);
}
}
while (--c != 0) {
ASSERT(zio_buf_cache[c] != NULL);
if (zio_buf_cache[c - 1] == NULL)
zio_buf_cache[c - 1] = zio_buf_cache[c];
ASSERT(zio_data_buf_cache[c] != NULL);
if (zio_data_buf_cache[c - 1] == NULL)
zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
}
zio_inject_init();
lz4_init();
}
void
zio_fini(void)
{
size_t n = SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT;
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
for (size_t i = 0; i < n; i++) {
if (zio_buf_cache_allocs[i] != zio_buf_cache_frees[i])
(void) printf("zio_fini: [%d] %llu != %llu\n",
(int)((i + 1) << SPA_MINBLOCKSHIFT),
(long long unsigned)zio_buf_cache_allocs[i],
(long long unsigned)zio_buf_cache_frees[i]);
}
#endif
/*
* The same kmem cache can show up multiple times in both zio_buf_cache
* and zio_data_buf_cache. Do a wasteful but trivially correct scan to
* sort it out.
*/
for (size_t i = 0; i < n; i++) {
kmem_cache_t *cache = zio_buf_cache[i];
if (cache == NULL)
continue;
for (size_t j = i; j < n; j++) {
if (cache == zio_buf_cache[j])
zio_buf_cache[j] = NULL;
if (cache == zio_data_buf_cache[j])
zio_data_buf_cache[j] = NULL;
}
kmem_cache_destroy(cache);
}
for (size_t i = 0; i < n; i++) {
kmem_cache_t *cache = zio_data_buf_cache[i];
if (cache == NULL)
continue;
for (size_t j = i; j < n; j++) {
if (cache == zio_data_buf_cache[j])
zio_data_buf_cache[j] = NULL;
}
kmem_cache_destroy(cache);
}
for (size_t i = 0; i < n; i++) {
VERIFY3P(zio_buf_cache[i], ==, NULL);
VERIFY3P(zio_data_buf_cache[i], ==, NULL);
}
kmem_cache_destroy(zio_link_cache);
kmem_cache_destroy(zio_cache);
zio_inject_fini();
lz4_fini();
}
/*
* ==========================================================================
* Allocate and free I/O buffers
* ==========================================================================
*/
/*
* Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
* crashdump if the kernel panics, so use it judiciously. Obviously, it's
* useful to inspect ZFS metadata, but if possible, we should avoid keeping
* excess / transient data in-core during a crashdump.
*/
void *
zio_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_allocs[c], 1);
#endif
return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
}
/*
* Use zio_data_buf_alloc to allocate data. The data will not appear in a
* crashdump if the kernel panics. This exists so that we will limit the amount
* of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
* of kernel heap dumped to disk when the kernel panics)
*/
void *
zio_data_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
}
void
zio_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_frees[c], 1);
#endif
kmem_cache_free(zio_buf_cache[c], buf);
}
void
zio_data_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
kmem_cache_free(zio_data_buf_cache[c], buf);
}
static void
zio_abd_free(void *abd, size_t size)
{
abd_free((abd_t *)abd);
}
/*
* ==========================================================================
* Push and pop I/O transform buffers
* ==========================================================================
*/
void
zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
zio_transform_func_t *transform)
{
zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
zt->zt_orig_abd = zio->io_abd;
zt->zt_orig_size = zio->io_size;
zt->zt_bufsize = bufsize;
zt->zt_transform = transform;
zt->zt_next = zio->io_transform_stack;
zio->io_transform_stack = zt;
zio->io_abd = data;
zio->io_size = size;
}
void
zio_pop_transforms(zio_t *zio)
{
zio_transform_t *zt;
while ((zt = zio->io_transform_stack) != NULL) {
if (zt->zt_transform != NULL)
zt->zt_transform(zio,
zt->zt_orig_abd, zt->zt_orig_size);
if (zt->zt_bufsize != 0)
abd_free(zio->io_abd);
zio->io_abd = zt->zt_orig_abd;
zio->io_size = zt->zt_orig_size;
zio->io_transform_stack = zt->zt_next;
kmem_free(zt, sizeof (zio_transform_t));
}
}
/*
* ==========================================================================
* I/O transform callbacks for subblocks, decompression, and decryption
* ==========================================================================
*/
static void
zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
{
ASSERT(zio->io_size > size);
if (zio->io_type == ZIO_TYPE_READ)
abd_copy(data, zio->io_abd, size);
}
static void
zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
{
if (zio->io_error == 0) {
void *tmp = abd_borrow_buf(data, size);
int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
zio->io_abd, tmp, zio->io_size, size,
&zio->io_prop.zp_complevel);
abd_return_buf_copy(data, tmp, size);
if (zio_injection_enabled && ret == 0)
ret = zio_handle_fault_injection(zio, EINVAL);
if (ret != 0)
zio->io_error = SET_ERROR(EIO);
}
}
static void
zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
{
int ret;
void *tmp;
blkptr_t *bp = zio->io_bp;
spa_t *spa = zio->io_spa;
uint64_t dsobj = zio->io_bookmark.zb_objset;
uint64_t lsize = BP_GET_LSIZE(bp);
dmu_object_type_t ot = BP_GET_TYPE(bp);
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
ASSERT(BP_USES_CRYPT(bp));
ASSERT3U(size, !=, 0);
if (zio->io_error != 0)
return;
/*
* Verify the cksum of MACs stored in an indirect bp. It will always
* be possible to verify this since it does not require an encryption
* key.
*/
if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
zio_crypt_decode_mac_bp(bp, mac);
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
/*
* We haven't decompressed the data yet, but
* zio_crypt_do_indirect_mac_checksum() requires
* decompressed data to be able to parse out the MACs
* from the indirect block. We decompress it now and
* throw away the result after we are finished.
*/
tmp = zio_buf_alloc(lsize);
ret = zio_decompress_data(BP_GET_COMPRESS(bp),
zio->io_abd, tmp, zio->io_size, lsize,
&zio->io_prop.zp_complevel);
if (ret != 0) {
ret = SET_ERROR(EIO);
goto error;
}
ret = zio_crypt_do_indirect_mac_checksum(B_FALSE,
tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac);
zio_buf_free(tmp, lsize);
} else {
ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
}
abd_copy(data, zio->io_abd, size);
if (zio_injection_enabled && ot != DMU_OT_DNODE && ret == 0) {
ret = zio_handle_decrypt_injection(spa,
&zio->io_bookmark, ot, ECKSUM);
}
if (ret != 0)
goto error;
return;
}
/*
* If this is an authenticated block, just check the MAC. It would be
* nice to separate this out into its own flag, but for the moment
* enum zio_flag is out of bits.
*/
if (BP_IS_AUTHENTICATED(bp)) {
if (ot == DMU_OT_OBJSET) {
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
} else {
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
zio->io_abd, size, mac);
if (zio_injection_enabled && ret == 0) {
ret = zio_handle_decrypt_injection(spa,
&zio->io_bookmark, ot, ECKSUM);
}
}
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
}
zio_crypt_decode_params_bp(bp, salt, iv);
if (ot == DMU_OT_INTENT_LOG) {
tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmp, mac);
abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp, mac);
}
ret = spa_do_crypt_abd(B_FALSE, spa, &zio->io_bookmark, BP_GET_TYPE(bp),
BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp), salt, iv, mac, size, data,
zio->io_abd, &no_crypt);
if (no_crypt)
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
error:
/* assert that the key was found unless this was speculative */
ASSERT(ret != EACCES || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
/*
* If there was a decryption / authentication error return EIO as
* the io_error. If this was not a speculative zio, create an ereport.
*/
if (ret == ECKSUM) {
zio->io_error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
spa_log_error(spa, &zio->io_bookmark);
(void) zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, &zio->io_bookmark, zio, 0);
}
} else {
zio->io_error = ret;
}
}
/*
* ==========================================================================
* I/O parent/child relationships and pipeline interlocks
* ==========================================================================
*/
zio_t *
zio_walk_parents(zio_t *cio, zio_link_t **zl)
{
list_t *pl = &cio->io_parent_list;
*zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_child == cio);
return ((*zl)->zl_parent);
}
zio_t *
zio_walk_children(zio_t *pio, zio_link_t **zl)
{
list_t *cl = &pio->io_child_list;
ASSERT(MUTEX_HELD(&pio->io_lock));
*zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_parent == pio);
return ((*zl)->zl_child);
}
zio_t *
zio_unique_parent(zio_t *cio)
{
zio_link_t *zl = NULL;
zio_t *pio = zio_walk_parents(cio, &zl);
VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
return (pio);
}
void
zio_add_child(zio_t *pio, zio_t *cio)
{
zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
/*
* Logical I/Os can have logical, gang, or vdev children.
* Gang I/Os can have gang or vdev children.
* Vdev I/Os can only have vdev children.
* The following ASSERT captures all of these constraints.
*/
ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
zl->zl_parent = pio;
zl->zl_child = cio;
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_children[cio->io_child_type][w] += !cio->io_state[w];
list_insert_head(&pio->io_child_list, zl);
list_insert_head(&cio->io_parent_list, zl);
pio->io_child_count++;
cio->io_parent_count++;
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
}
static void
zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
{
ASSERT(zl->zl_parent == pio);
ASSERT(zl->zl_child == cio);
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
list_remove(&pio->io_child_list, zl);
list_remove(&cio->io_parent_list, zl);
pio->io_child_count--;
cio->io_parent_count--;
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
kmem_cache_free(zio_link_cache, zl);
}
static boolean_t
zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
{
boolean_t waiting = B_FALSE;
mutex_enter(&zio->io_lock);
ASSERT(zio->io_stall == NULL);
for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
continue;
uint64_t *countp = &zio->io_children[c][wait];
if (*countp != 0) {
zio->io_stage >>= 1;
ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
zio->io_stall = countp;
waiting = B_TRUE;
break;
}
}
mutex_exit(&zio->io_lock);
return (waiting);
}
__attribute__((always_inline))
static inline void
zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait,
zio_t **next_to_executep)
{
uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
int *errorp = &pio->io_child_error[zio->io_child_type];
mutex_enter(&pio->io_lock);
if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
*errorp = zio_worst_error(*errorp, zio->io_error);
pio->io_reexecute |= zio->io_reexecute;
ASSERT3U(*countp, >, 0);
(*countp)--;
if (*countp == 0 && pio->io_stall == countp) {
zio_taskq_type_t type =
pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
ZIO_TASKQ_INTERRUPT;
pio->io_stall = NULL;
mutex_exit(&pio->io_lock);
/*
* If we can tell the caller to execute this parent next, do
* so. Otherwise dispatch the parent zio as its own task.
*
* Having the caller execute the parent when possible reduces
* locking on the zio taskq's, reduces context switch
* overhead, and has no recursion penalty. Note that one
* read from disk typically causes at least 3 zio's: a
* zio_null(), the logical zio_read(), and then a physical
* zio. When the physical ZIO completes, we are able to call
* zio_done() on all 3 of these zio's from one invocation of
* zio_execute() by returning the parent back to
* zio_execute(). Since the parent isn't executed until this
* thread returns back to zio_execute(), the caller should do
* so promptly.
*
* In other cases, dispatching the parent prevents
* overflowing the stack when we have deeply nested
* parent-child relationships, as we do with the "mega zio"
* of writes for spa_sync(), and the chain of ZIL blocks.
*/
if (next_to_executep != NULL && *next_to_executep == NULL) {
*next_to_executep = pio;
} else {
zio_taskq_dispatch(pio, type, B_FALSE);
}
} else {
mutex_exit(&pio->io_lock);
}
}
static void
zio_inherit_child_errors(zio_t *zio, enum zio_child c)
{
if (zio->io_child_error[c] != 0 && zio->io_error == 0)
zio->io_error = zio->io_child_error[c];
}
int
zio_bookmark_compare(const void *x1, const void *x2)
{
const zio_t *z1 = x1;
const zio_t *z2 = x2;
if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
return (-1);
if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
return (1);
if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
return (-1);
if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
return (1);
if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
return (-1);
if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
return (1);
if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
return (-1);
if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
return (1);
if (z1 < z2)
return (-1);
if (z1 > z2)
return (1);
return (0);
}
/*
* ==========================================================================
* Create the various types of I/O (read, write, free, etc)
* ==========================================================================
*/
static zio_t *
zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
void *private, zio_type_t type, zio_priority_t priority,
enum zio_flag flags, vdev_t *vd, uint64_t offset,
const zbookmark_phys_t *zb, enum zio_stage stage,
enum zio_stage pipeline)
{
zio_t *zio;
IMPLY(type != ZIO_TYPE_TRIM, psize <= SPA_MAXBLOCKSIZE);
ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
ASSERT(vd || stage == ZIO_STAGE_OPEN);
IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
bzero(zio, sizeof (zio_t));
mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
list_create(&zio->io_parent_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_parent_node));
list_create(&zio->io_child_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_child_node));
metaslab_trace_init(&zio->io_alloc_list);
if (vd != NULL)
zio->io_child_type = ZIO_CHILD_VDEV;
else if (flags & ZIO_FLAG_GANG_CHILD)
zio->io_child_type = ZIO_CHILD_GANG;
else if (flags & ZIO_FLAG_DDT_CHILD)
zio->io_child_type = ZIO_CHILD_DDT;
else
zio->io_child_type = ZIO_CHILD_LOGICAL;
if (bp != NULL) {
zio->io_bp = (blkptr_t *)bp;
zio->io_bp_copy = *bp;
zio->io_bp_orig = *bp;
if (type != ZIO_TYPE_WRITE ||
zio->io_child_type == ZIO_CHILD_DDT)
zio->io_bp = &zio->io_bp_copy; /* so caller can free */
if (zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_logical = zio;
if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
pipeline |= ZIO_GANG_STAGES;
}
zio->io_spa = spa;
zio->io_txg = txg;
zio->io_done = done;
zio->io_private = private;
zio->io_type = type;
zio->io_priority = priority;
zio->io_vd = vd;
zio->io_offset = offset;
zio->io_orig_abd = zio->io_abd = data;
zio->io_orig_size = zio->io_size = psize;
zio->io_lsize = lsize;
zio->io_orig_flags = zio->io_flags = flags;
zio->io_orig_stage = zio->io_stage = stage;
zio->io_orig_pipeline = zio->io_pipeline = pipeline;
zio->io_pipeline_trace = ZIO_STAGE_OPEN;
zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY);
zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
if (zb != NULL)
zio->io_bookmark = *zb;
if (pio != NULL) {
zio->io_metaslab_class = pio->io_metaslab_class;
if (zio->io_logical == NULL)
zio->io_logical = pio->io_logical;
if (zio->io_child_type == ZIO_CHILD_GANG)
zio->io_gang_leader = pio->io_gang_leader;
zio_add_child(pio, zio);
}
taskq_init_ent(&zio->io_tqent);
return (zio);
}
static void
zio_destroy(zio_t *zio)
{
metaslab_trace_fini(&zio->io_alloc_list);
list_destroy(&zio->io_parent_list);
list_destroy(&zio->io_child_list);
mutex_destroy(&zio->io_lock);
cv_destroy(&zio->io_cv);
kmem_cache_free(zio_cache, zio);
}
zio_t *
zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
void *private, enum zio_flag flags)
{
zio_t *zio;
zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
return (zio);
}
zio_t *
zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags)
{
return (zio_null(NULL, spa, NULL, done, private, flags));
}
static int
zfs_blkptr_verify_log(spa_t *spa, const blkptr_t *bp,
enum blk_verify_flag blk_verify, const char *fmt, ...)
{
va_list adx;
char buf[256];
va_start(adx, fmt);
(void) vsnprintf(buf, sizeof (buf), fmt, adx);
va_end(adx);
switch (blk_verify) {
case BLK_VERIFY_HALT:
dprintf_bp(bp, "blkptr at %p dprintf_bp():", bp);
zfs_panic_recover("%s: %s", spa_name(spa), buf);
break;
case BLK_VERIFY_LOG:
zfs_dbgmsg("%s: %s", spa_name(spa), buf);
break;
case BLK_VERIFY_ONLY:
break;
}
return (1);
}
/*
* Verify the block pointer fields contain reasonable values. This means
* it only contains known object types, checksum/compression identifiers,
* block sizes within the maximum allowed limits, valid DVAs, etc.
*
* If everything checks out B_TRUE is returned. The zfs_blkptr_verify
* argument controls the behavior when an invalid field is detected.
*
* Modes for zfs_blkptr_verify:
* 1) BLK_VERIFY_ONLY (evaluate the block)
* 2) BLK_VERIFY_LOG (evaluate the block and log problems)
* 3) BLK_VERIFY_HALT (call zfs_panic_recover on error)
*/
boolean_t
zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp, boolean_t config_held,
enum blk_verify_flag blk_verify)
{
int errors = 0;
if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid TYPE %llu",
bp, (longlong_t)BP_GET_TYPE(bp));
}
if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS ||
BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid CHECKSUM %llu",
bp, (longlong_t)BP_GET_CHECKSUM(bp));
}
if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS ||
BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid COMPRESS %llu",
bp, (longlong_t)BP_GET_COMPRESS(bp));
}
if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid LSIZE %llu",
bp, (longlong_t)BP_GET_LSIZE(bp));
}
if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid PSIZE %llu",
bp, (longlong_t)BP_GET_PSIZE(bp));
}
if (BP_IS_EMBEDDED(bp)) {
if (BPE_GET_ETYPE(bp) >= NUM_BP_EMBEDDED_TYPES) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p has invalid ETYPE %llu",
bp, (longlong_t)BPE_GET_ETYPE(bp));
}
}
/*
* Do not verify individual DVAs if the config is not trusted. This
* will be done once the zio is executed in vdev_mirror_map_alloc.
*/
if (!spa->spa_trust_config)
- return (B_TRUE);
+ return (errors == 0);
if (!config_held)
spa_config_enter(spa, SCL_VDEV, bp, RW_READER);
else
ASSERT(spa_config_held(spa, SCL_VDEV, RW_WRITER));
/*
* Pool-specific checks.
*
* Note: it would be nice to verify that the blk_birth and
* BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze()
* allows the birth time of log blocks (and dmu_sync()-ed blocks
* that are in the log) to be arbitrarily large.
*/
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
const dva_t *dva = &bp->blk_dva[i];
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p DVA %u has invalid VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_hole_ops) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p DVA %u has hole VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_missing_ops) {
/*
* "missing" vdevs are valid during import, but we
* don't have their detailed info (e.g. asize), so
* we can't perform any more checks on them.
*/
continue;
}
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize) {
errors += zfs_blkptr_verify_log(spa, bp, blk_verify,
"blkptr at %p DVA %u has invalid OFFSET %llu",
bp, i, (longlong_t)offset);
}
}
if (errors > 0)
dprintf_bp(bp, "blkptr at %p dprintf_bp():", bp);
if (!config_held)
spa_config_exit(spa, SCL_VDEV, bp);
return (errors == 0);
}
boolean_t
zfs_dva_valid(spa_t *spa, const dva_t *dva, const blkptr_t *bp)
{
uint64_t vdevid = DVA_GET_VDEV(dva);
if (vdevid >= spa->spa_root_vdev->vdev_children)
return (B_FALSE);
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL)
return (B_FALSE);
if (vd->vdev_ops == &vdev_hole_ops)
return (B_FALSE);
if (vd->vdev_ops == &vdev_missing_ops) {
return (B_FALSE);
}
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t asize = DVA_GET_ASIZE(dva);
if (DVA_GET_GANG(dva))
asize = vdev_gang_header_asize(vd);
if (offset + asize > vd->vdev_asize)
return (B_FALSE);
return (B_TRUE);
}
zio_t *
zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb)
{
zio_t *zio;
zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
data, size, size, done, private,
ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
return (zio);
}
zio_t *
zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
zio_done_func_t *ready, zio_done_func_t *children_ready,
zio_done_func_t *physdone, zio_done_func_t *done,
void *private, zio_priority_t priority, enum zio_flag flags,
const zbookmark_phys_t *zb)
{
zio_t *zio;
ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
zp->zp_compress >= ZIO_COMPRESS_OFF &&
zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
DMU_OT_IS_VALID(zp->zp_type) &&
zp->zp_level < 32 &&
zp->zp_copies > 0 &&
zp->zp_copies <= spa_max_replication(spa));
zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
zio->io_ready = ready;
zio->io_children_ready = children_ready;
zio->io_physdone = physdone;
zio->io_prop = *zp;
/*
* Data can be NULL if we are going to call zio_write_override() to
* provide the already-allocated BP. But we may need the data to
* verify a dedup hit (if requested). In this case, don't try to
* dedup (just take the already-allocated BP verbatim). Encrypted
* dedup blocks need data as well so we also disable dedup in this
* case.
*/
if (data == NULL &&
(zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
}
return (zio);
}
zio_t *
zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb)
{
zio_t *zio;
zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
return (zio);
}
void
zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite)
{
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
/*
* We must reset the io_prop to match the values that existed
* when the bp was first written by dmu_sync() keeping in mind
* that nopwrite and dedup are mutually exclusive.
*/
zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
zio->io_prop.zp_nopwrite = nopwrite;
zio->io_prop.zp_copies = copies;
zio->io_bp_override = bp;
}
void
zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
{
(void) zfs_blkptr_verify(spa, bp, B_FALSE, BLK_VERIFY_HALT);
/*
* The check for EMBEDDED is a performance optimization. We
* process the free here (by ignoring it) rather than
* putting it on the list and then processing it in zio_free_sync().
*/
if (BP_IS_EMBEDDED(bp))
return;
metaslab_check_free(spa, bp);
/*
* Frees that are for the currently-syncing txg, are not going to be
* deferred, and which will not need to do a read (i.e. not GANG or
* DEDUP), can be processed immediately. Otherwise, put them on the
* in-memory list for later processing.
*
* Note that we only defer frees after zfs_sync_pass_deferred_free
* when the log space map feature is disabled. [see relevant comment
* in spa_sync_iterate_to_convergence()]
*/
if (BP_IS_GANG(bp) ||
BP_GET_DEDUP(bp) ||
txg != spa->spa_syncing_txg ||
(spa_sync_pass(spa) >= zfs_sync_pass_deferred_free &&
!spa_feature_is_active(spa, SPA_FEATURE_LOG_SPACEMAP))) {
bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
} else {
VERIFY3P(zio_free_sync(NULL, spa, txg, bp, 0), ==, NULL);
}
}
/*
* To improve performance, this function may return NULL if we were able
* to do the free immediately. This avoids the cost of creating a zio
* (and linking it to the parent, etc).
*/
zio_t *
zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
enum zio_flag flags)
{
ASSERT(!BP_IS_HOLE(bp));
ASSERT(spa_syncing_txg(spa) == txg);
if (BP_IS_EMBEDDED(bp))
return (NULL);
metaslab_check_free(spa, bp);
arc_freed(spa, bp);
dsl_scan_freed(spa, bp);
if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp)) {
/*
* GANG and DEDUP blocks can induce a read (for the gang block
* header, or the DDT), so issue them asynchronously so that
* this thread is not tied up.
*/
enum zio_stage stage =
ZIO_FREE_PIPELINE | ZIO_STAGE_ISSUE_ASYNC;
return (zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), NULL, NULL,
ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage));
} else {
metaslab_free(spa, bp, txg, B_FALSE);
return (NULL);
}
}
zio_t *
zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio_done_func_t *done, void *private, enum zio_flag flags)
{
zio_t *zio;
(void) zfs_blkptr_verify(spa, bp, flags & ZIO_FLAG_CONFIG_WRITER,
BLK_VERIFY_HALT);
if (BP_IS_EMBEDDED(bp))
return (zio_null(pio, spa, NULL, NULL, NULL, 0));
/*
* A claim is an allocation of a specific block. Claims are needed
* to support immediate writes in the intent log. The issue is that
* immediate writes contain committed data, but in a txg that was
* *not* committed. Upon opening the pool after an unclean shutdown,
* the intent log claims all blocks that contain immediate write data
* so that the SPA knows they're in use.
*
* All claims *must* be resolved in the first txg -- before the SPA
* starts allocating blocks -- so that nothing is allocated twice.
* If txg == 0 we just verify that the block is claimable.
*/
ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <,
spa_min_claim_txg(spa));
ASSERT(txg == spa_min_claim_txg(spa) || txg == 0);
ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(8) */
zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
ASSERT0(zio->io_queued_timestamp);
return (zio);
}
zio_t *
zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
zio_done_func_t *done, void *private, enum zio_flag flags)
{
zio_t *zio;
int c;
if (vd->vdev_children == 0) {
zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
zio->io_cmd = cmd;
} else {
zio = zio_null(pio, spa, NULL, NULL, NULL, flags);
for (c = 0; c < vd->vdev_children; c++)
zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
done, private, flags));
}
return (zio);
}
zio_t *
zio_trim(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
zio_done_func_t *done, void *private, zio_priority_t priority,
enum zio_flag flags, enum trim_flag trim_flags)
{
zio_t *zio;
ASSERT0(vd->vdev_children);
ASSERT0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
ASSERT0(P2PHASE(size, 1ULL << vd->vdev_ashift));
ASSERT3U(size, !=, 0);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, NULL, size, size, done,
private, ZIO_TYPE_TRIM, priority, flags | ZIO_FLAG_PHYSICAL,
vd, offset, NULL, ZIO_STAGE_OPEN, ZIO_TRIM_PIPELINE);
zio->io_trim_flags = trim_flags;
return (zio);
}
zio_t *
zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
return (zio);
}
zio_t *
zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
/*
* zec checksums are necessarily destructive -- they modify
* the end of the write buffer to hold the verifier/checksum.
* Therefore, we must make a local copy in case the data is
* being written to multiple places in parallel.
*/
abd_t *wbuf = abd_alloc_sametype(data, size);
abd_copy(wbuf, data, size);
zio_push_transform(zio, wbuf, size, size, NULL);
}
return (zio);
}
/*
* Create a child I/O to do some work for us.
*/
zio_t *
zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
abd_t *data, uint64_t size, int type, zio_priority_t priority,
enum zio_flag flags, zio_done_func_t *done, void *private)
{
enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
zio_t *zio;
/*
* vdev child I/Os do not propagate their error to the parent.
* Therefore, for correct operation the caller *must* check for
* and handle the error in the child i/o's done callback.
* The only exceptions are i/os that we don't care about
* (OPTIONAL or REPAIR).
*/
ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
done != NULL);
if (type == ZIO_TYPE_READ && bp != NULL) {
/*
* If we have the bp, then the child should perform the
* checksum and the parent need not. This pushes error
* detection as close to the leaves as possible and
* eliminates redundant checksums in the interior nodes.
*/
pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
}
if (vd->vdev_ops->vdev_op_leaf) {
ASSERT0(vd->vdev_children);
offset += VDEV_LABEL_START_SIZE;
}
flags |= ZIO_VDEV_CHILD_FLAGS(pio);
/*
* If we've decided to do a repair, the write is not speculative --
* even if the original read was.
*/
if (flags & ZIO_FLAG_IO_REPAIR)
flags &= ~ZIO_FLAG_SPECULATIVE;
/*
* If we're creating a child I/O that is not associated with a
* top-level vdev, then the child zio is not an allocating I/O.
* If this is a retried I/O then we ignore it since we will
* have already processed the original allocating I/O.
*/
if (flags & ZIO_FLAG_IO_ALLOCATING &&
(vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
ASSERT(pio->io_metaslab_class != NULL);
ASSERT(pio->io_metaslab_class->mc_alloc_throttle_enabled);
ASSERT(type == ZIO_TYPE_WRITE);
ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
pio->io_child_type == ZIO_CHILD_GANG);
flags &= ~ZIO_FLAG_IO_ALLOCATING;
}
zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
zio->io_physdone = pio->io_physdone;
if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL)
zio->io_logical->io_phys_children++;
return (zio);
}
zio_t *
zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
zio_type_t type, zio_priority_t priority, enum zio_flag flags,
zio_done_func_t *done, void *private)
{
zio_t *zio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
data, size, size, done, private, type, priority,
flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
vd, offset, NULL,
ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
return (zio);
}
void
zio_flush(zio_t *zio, vdev_t *vd)
{
zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE,
NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY));
}
void
zio_shrink(zio_t *zio, uint64_t size)
{
ASSERT3P(zio->io_executor, ==, NULL);
ASSERT3U(zio->io_orig_size, ==, zio->io_size);
ASSERT3U(size, <=, zio->io_size);
/*
* We don't shrink for raidz because of problems with the
* reconstruction when reading back less than the block size.
* Note, BP_IS_RAIDZ() assumes no compression.
*/
ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
if (!BP_IS_RAIDZ(zio->io_bp)) {
/* we are not doing a raw write */
ASSERT3U(zio->io_size, ==, zio->io_lsize);
zio->io_orig_size = zio->io_size = zio->io_lsize = size;
}
}
/*
* ==========================================================================
* Prepare to read and write logical blocks
* ==========================================================================
*/
static zio_t *
zio_read_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
uint64_t psize =
BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
zio->io_child_type == ZIO_CHILD_LOGICAL &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decompress);
}
if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
zio->io_child_type == ZIO_CHILD_LOGICAL) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decrypt);
}
if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
int psize = BPE_GET_PSIZE(bp);
void *data = abd_borrow_buf(zio->io_abd, psize);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
decode_embedded_bp_compressed(bp, data);
abd_return_buf_copy(zio->io_abd, data, psize);
} else {
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
}
if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0)
zio->io_flags |= ZIO_FLAG_DONT_CACHE;
if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP)
zio->io_flags |= ZIO_FLAG_DONT_CACHE;
if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
return (zio);
}
static zio_t *
zio_write_bp_init(zio_t *zio)
{
if (!IO_IS_ALLOCATING(zio))
return (zio);
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
if (zio->io_bp_override) {
blkptr_t *bp = zio->io_bp;
zio_prop_t *zp = &zio->io_prop;
ASSERT(bp->blk_birth != zio->io_txg);
ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0);
*bp = *zio->io_bp_override;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (BP_IS_EMBEDDED(bp))
return (zio);
/*
* If we've been overridden and nopwrite is set then
* set the flag accordingly to indicate that a nopwrite
* has already occurred.
*/
if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
ASSERT(!zp->zp_dedup);
ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
zio->io_flags |= ZIO_FLAG_NOPWRITE;
return (zio);
}
ASSERT(!zp->zp_nopwrite);
if (BP_IS_HOLE(bp) || !zp->zp_dedup)
return (zio);
ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
!zp->zp_encrypt) {
BP_SET_DEDUP(bp, 1);
zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
return (zio);
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
}
return (zio);
}
static zio_t *
zio_write_compress(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_prop_t *zp = &zio->io_prop;
enum zio_compress compress = zp->zp_compress;
blkptr_t *bp = zio->io_bp;
uint64_t lsize = zio->io_lsize;
uint64_t psize = zio->io_size;
int pass = 1;
/*
* If our children haven't all reached the ready stage,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
return (NULL);
}
if (!IO_IS_ALLOCATING(zio))
return (zio);
if (zio->io_children_ready != NULL) {
/*
* Now that all our children are ready, run the callback
* associated with this zio in case it wants to modify the
* data to be written.
*/
ASSERT3U(zp->zp_level, >, 0);
zio->io_children_ready(zio);
}
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) {
/*
* We're rewriting an existing block, which means we're
* working on behalf of spa_sync(). For spa_sync() to
* converge, it must eventually be the case that we don't
* have to allocate new blocks. But compression changes
* the blocksize, which forces a reallocate, and makes
* convergence take longer. Therefore, after the first
* few passes, stop compressing to ensure convergence.
*/
pass = spa_sync_pass(spa);
ASSERT(zio->io_txg == spa_syncing_txg(spa));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!BP_GET_DEDUP(bp));
if (pass >= zfs_sync_pass_dont_compress)
compress = ZIO_COMPRESS_OFF;
/* Make sure someone doesn't change their mind on overwrites */
ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp),
spa_max_replication(spa)) == BP_GET_NDVAS(bp));
}
/* If it's a compressed write that is not raw, compress the buffer. */
if (compress != ZIO_COMPRESS_OFF &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
void *cbuf = zio_buf_alloc(lsize);
psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize,
zp->zp_complevel);
if (psize == 0 || psize >= lsize) {
compress = ZIO_COMPRESS_OFF;
zio_buf_free(cbuf, lsize);
} else if (!zp->zp_dedup && !zp->zp_encrypt &&
psize <= BPE_PAYLOAD_SIZE &&
zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
encode_embedded_bp_compressed(bp,
cbuf, compress, lsize, psize);
BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
BP_SET_TYPE(bp, zio->io_prop.zp_type);
BP_SET_LEVEL(bp, zio->io_prop.zp_level);
zio_buf_free(cbuf, lsize);
bp->blk_birth = zio->io_txg;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
ASSERT(spa_feature_is_active(spa,
SPA_FEATURE_EMBEDDED_DATA));
return (zio);
} else {
/*
* Round compressed size up to the minimum allocation
* size of the smallest-ashift device, and zero the
* tail. This ensures that the compressed size of the
* BP (and thus compressratio property) are correct,
* in that we charge for the padding used to fill out
* the last sector.
*/
ASSERT3U(spa->spa_min_alloc, >=, SPA_MINBLOCKSHIFT);
size_t rounded = (size_t)roundup(psize,
spa->spa_min_alloc);
if (rounded >= lsize) {
compress = ZIO_COMPRESS_OFF;
zio_buf_free(cbuf, lsize);
psize = lsize;
} else {
abd_t *cdata = abd_get_from_buf(cbuf, lsize);
abd_take_ownership_of_buf(cdata, B_TRUE);
abd_zero_off(cdata, psize, rounded - psize);
psize = rounded;
zio_push_transform(zio, cdata,
psize, lsize, NULL);
}
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
} else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
zp->zp_type == DMU_OT_DNODE) {
/*
* The DMU actually relies on the zio layer's compression
* to free metadnode blocks that have had all contained
* dnodes freed. As a result, even when doing a raw
* receive, we must check whether the block can be compressed
* to a hole.
*/
psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
zio->io_abd, NULL, lsize, zp->zp_complevel);
if (psize == 0 || psize >= lsize)
compress = ZIO_COMPRESS_OFF;
+ } else if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
+ size_t rounded = MIN((size_t)roundup(psize,
+ spa->spa_min_alloc), lsize);
+
+ if (rounded != psize) {
+ abd_t *cdata = abd_alloc_linear(rounded, B_TRUE);
+ abd_zero_off(cdata, psize, rounded - psize);
+ abd_copy_off(cdata, zio->io_abd, 0, 0, psize);
+ psize = rounded;
+ zio_push_transform(zio, cdata,
+ psize, rounded, NULL);
+ }
} else {
ASSERT3U(psize, !=, 0);
}
/*
* The final pass of spa_sync() must be all rewrites, but the first
* few passes offer a trade-off: allocating blocks defers convergence,
* but newly allocated blocks are sequential, so they can be written
* to disk faster. Therefore, we allow the first few passes of
* spa_sync() to allocate new blocks, but force rewrites after that.
* There should only be a handful of blocks after pass 1 in any case.
*/
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
BP_GET_PSIZE(bp) == psize &&
pass >= zfs_sync_pass_rewrite) {
VERIFY3U(psize, !=, 0);
enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
zio->io_flags |= ZIO_FLAG_IO_REWRITE;
} else {
BP_ZERO(bp);
zio->io_pipeline = ZIO_WRITE_PIPELINE;
}
if (psize == 0) {
if (zio->io_bp_orig.blk_birth != 0 &&
spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_BIRTH(bp, zio->io_txg, 0);
}
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
} else {
ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, compress);
BP_SET_CHECKSUM(bp, zp->zp_checksum);
BP_SET_DEDUP(bp, zp->zp_dedup);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
if (zp->zp_dedup) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!zp->zp_encrypt ||
DMU_OT_IS_ENCRYPTED(zp->zp_type));
zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
}
if (zp->zp_nopwrite) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
}
}
return (zio);
}
static zio_t *
zio_free_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
if (BP_GET_DEDUP(bp))
zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
}
ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
return (zio);
}
/*
* ==========================================================================
* Execute the I/O pipeline
* ==========================================================================
*/
static void
zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
{
spa_t *spa = zio->io_spa;
zio_type_t t = zio->io_type;
int flags = (cutinline ? TQ_FRONT : 0);
/*
* If we're a config writer or a probe, the normal issue and
* interrupt threads may all be blocked waiting for the config lock.
* In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
*/
if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
t = ZIO_TYPE_NULL;
/*
* A similar issue exists for the L2ARC write thread until L2ARC 2.0.
*/
if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
t = ZIO_TYPE_NULL;
/*
* If this is a high priority I/O, then use the high priority taskq if
* available.
*/
if ((zio->io_priority == ZIO_PRIORITY_NOW ||
zio->io_priority == ZIO_PRIORITY_SYNC_WRITE) &&
spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
q++;
ASSERT3U(q, <, ZIO_TASKQ_TYPES);
/*
* NB: We are assuming that the zio can only be dispatched
* to a single taskq at a time. It would be a grievous error
* to dispatch the zio to another taskq at the same time.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
spa_taskq_dispatch_ent(spa, t, q, zio_execute, zio, flags,
&zio->io_tqent);
}
static boolean_t
zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
{
spa_t *spa = zio->io_spa;
taskq_t *tq = taskq_of_curthread();
for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t i;
for (i = 0; i < tqs->stqs_count; i++) {
if (tqs->stqs_taskq[i] == tq)
return (B_TRUE);
}
}
return (B_FALSE);
}
static zio_t *
zio_issue_async(zio_t *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (NULL);
}
void
zio_interrupt(void *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
}
void
zio_delay_interrupt(zio_t *zio)
{
/*
* The timeout_generic() function isn't defined in userspace, so
* rather than trying to implement the function, the zio delay
* functionality has been disabled for userspace builds.
*/
#ifdef _KERNEL
/*
* If io_target_timestamp is zero, then no delay has been registered
* for this IO, thus jump to the end of this function and "skip" the
* delay; issuing it directly to the zio layer.
*/
if (zio->io_target_timestamp != 0) {
hrtime_t now = gethrtime();
if (now >= zio->io_target_timestamp) {
/*
* This IO has already taken longer than the target
* delay to complete, so we don't want to delay it
* any longer; we "miss" the delay and issue it
* directly to the zio layer. This is likely due to
* the target latency being set to a value less than
* the underlying hardware can satisfy (e.g. delay
* set to 1ms, but the disks take 10ms to complete an
* IO request).
*/
DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
hrtime_t, now);
zio_interrupt(zio);
} else {
taskqid_t tid;
hrtime_t diff = zio->io_target_timestamp - now;
clock_t expire_at_tick = ddi_get_lbolt() +
NSEC_TO_TICK(diff);
DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
hrtime_t, now, hrtime_t, diff);
if (NSEC_TO_TICK(diff) == 0) {
/* Our delay is less than a jiffy - just spin */
zfs_sleep_until(zio->io_target_timestamp);
zio_interrupt(zio);
} else {
/*
* Use taskq_dispatch_delay() in the place of
* OpenZFS's timeout_generic().
*/
tid = taskq_dispatch_delay(system_taskq,
zio_interrupt, zio, TQ_NOSLEEP,
expire_at_tick);
if (tid == TASKQID_INVALID) {
/*
* Couldn't allocate a task. Just
* finish the zio without a delay.
*/
zio_interrupt(zio);
}
}
}
return;
}
#endif
DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
zio_interrupt(zio);
}
static void
zio_deadman_impl(zio_t *pio, int ziodepth)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
vdev_t *vd = pio->io_vd;
if (zio_deadman_log_all || (vd != NULL && vd->vdev_ops->vdev_op_leaf)) {
vdev_queue_t *vq = vd ? &vd->vdev_queue : NULL;
zbookmark_phys_t *zb = &pio->io_bookmark;
uint64_t delta = gethrtime() - pio->io_timestamp;
uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
zfs_dbgmsg("slow zio[%d]: zio=%px timestamp=%llu "
"delta=%llu queued=%llu io=%llu "
"path=%s "
"last=%llu type=%d "
"priority=%d flags=0x%x stage=0x%x "
"pipeline=0x%x pipeline-trace=0x%x "
"objset=%llu object=%llu "
"level=%llu blkid=%llu "
"offset=%llu size=%llu "
"error=%d",
ziodepth, pio, pio->io_timestamp,
(u_longlong_t)delta, pio->io_delta, pio->io_delay,
vd ? vd->vdev_path : "NULL",
vq ? vq->vq_io_complete_ts : 0, pio->io_type,
pio->io_priority, pio->io_flags, pio->io_stage,
pio->io_pipeline, pio->io_pipeline_trace,
(u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid,
(u_longlong_t)pio->io_offset, (u_longlong_t)pio->io_size,
pio->io_error);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
pio->io_spa, vd, zb, pio, 0);
if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
taskq_empty_ent(&pio->io_tqent)) {
zio_interrupt(pio);
}
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_deadman_impl(cio, ziodepth + 1);
}
mutex_exit(&pio->io_lock);
}
/*
* Log the critical information describing this zio and all of its children
* using the zfs_dbgmsg() interface then post deadman event for the ZED.
*/
void
zio_deadman(zio_t *pio, char *tag)
{
spa_t *spa = pio->io_spa;
char *name = spa_name(spa);
if (!zfs_deadman_enabled || spa_suspended(spa))
return;
zio_deadman_impl(pio, 0);
switch (spa_get_deadman_failmode(spa)) {
case ZIO_FAILURE_MODE_WAIT:
zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_CONTINUE:
zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_PANIC:
fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
break;
}
}
/*
* Execute the I/O pipeline until one of the following occurs:
* (1) the I/O completes; (2) the pipeline stalls waiting for
* dependent child I/Os; (3) the I/O issues, so we're waiting
* for an I/O completion interrupt; (4) the I/O is delegated by
* vdev-level caching or aggregation; (5) the I/O is deferred
* due to vdev-level queueing; (6) the I/O is handed off to
* another thread. In all cases, the pipeline stops whenever
* there's no CPU work; it never burns a thread in cv_wait_io().
*
* There's no locking on io_stage because there's no legitimate way
* for multiple threads to be attempting to process the same I/O.
*/
static zio_pipe_stage_t *zio_pipeline[];
/*
* zio_execute() is a wrapper around the static function
* __zio_execute() so that we can force __zio_execute() to be
* inlined. This reduces stack overhead which is important
* because __zio_execute() is called recursively in several zio
* code paths. zio_execute() itself cannot be inlined because
* it is externally visible.
*/
void
zio_execute(void *zio)
{
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
__zio_execute(zio);
spl_fstrans_unmark(cookie);
}
/*
* Used to determine if in the current context the stack is sized large
* enough to allow zio_execute() to be called recursively. A minimum
* stack size of 16K is required to avoid needing to re-dispatch the zio.
*/
static boolean_t
zio_execute_stack_check(zio_t *zio)
{
#if !defined(HAVE_LARGE_STACKS)
dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
/* Executing in txg_sync_thread() context. */
if (dp && curthread == dp->dp_tx.tx_sync_thread)
return (B_TRUE);
/* Pool initialization outside of zio_taskq context. */
if (dp && spa_is_initializing(dp->dp_spa) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
return (B_TRUE);
#endif /* HAVE_LARGE_STACKS */
return (B_FALSE);
}
__attribute__((always_inline))
static inline void
__zio_execute(zio_t *zio)
{
ASSERT3U(zio->io_queued_timestamp, >, 0);
while (zio->io_stage < ZIO_STAGE_DONE) {
enum zio_stage pipeline = zio->io_pipeline;
enum zio_stage stage = zio->io_stage;
zio->io_executor = curthread;
ASSERT(!MUTEX_HELD(&zio->io_lock));
ASSERT(ISP2(stage));
ASSERT(zio->io_stall == NULL);
do {
stage <<= 1;
} while ((stage & pipeline) == 0);
ASSERT(stage <= ZIO_STAGE_DONE);
/*
* If we are in interrupt context and this pipeline stage
* will grab a config lock that is held across I/O,
* or may wait for an I/O that needs an interrupt thread
* to complete, issue async to avoid deadlock.
*
* For VDEV_IO_START, we cut in line so that the io will
* be sent to disk promptly.
*/
if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
/*
* If the current context doesn't have large enough stacks
* the zio must be issued asynchronously to prevent overflow.
*/
if (zio_execute_stack_check(zio)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
zio->io_stage = stage;
zio->io_pipeline_trace |= zio->io_stage;
/*
* The zio pipeline stage returns the next zio to execute
* (typically the same as this one), or NULL if we should
* stop.
*/
zio = zio_pipeline[highbit64(stage) - 1](zio);
if (zio == NULL)
return;
}
}
/*
* ==========================================================================
* Initiate I/O, either sync or async
* ==========================================================================
*/
int
zio_wait(zio_t *zio)
{
/*
* Some routines, like zio_free_sync(), may return a NULL zio
* to avoid the performance overhead of creating and then destroying
* an unneeded zio. For the callers' simplicity, we accept a NULL
* zio and ignore it.
*/
if (zio == NULL)
return (0);
long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
int error;
ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
ASSERT3P(zio->io_executor, ==, NULL);
zio->io_waiter = curthread;
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
__zio_execute(zio);
mutex_enter(&zio->io_lock);
while (zio->io_executor != NULL) {
error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
ddi_get_lbolt() + timeout);
if (zfs_deadman_enabled && error == -1 &&
gethrtime() - zio->io_queued_timestamp >
spa_deadman_ziotime(zio->io_spa)) {
mutex_exit(&zio->io_lock);
timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
zio_deadman(zio, FTAG);
mutex_enter(&zio->io_lock);
}
}
mutex_exit(&zio->io_lock);
error = zio->io_error;
zio_destroy(zio);
return (error);
}
void
zio_nowait(zio_t *zio)
{
/*
* See comment in zio_wait().
*/
if (zio == NULL)
return;
ASSERT3P(zio->io_executor, ==, NULL);
if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
zio_unique_parent(zio) == NULL) {
zio_t *pio;
/*
* This is a logical async I/O with no parent to wait for it.
* We add it to the spa_async_root_zio "Godfather" I/O which
* will ensure they complete prior to unloading the pool.
*/
spa_t *spa = zio->io_spa;
pio = spa->spa_async_zio_root[CPU_SEQID_UNSTABLE];
zio_add_child(pio, zio);
}
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
__zio_execute(zio);
}
/*
* ==========================================================================
* Reexecute, cancel, or suspend/resume failed I/O
* ==========================================================================
*/
static void
zio_reexecute(void *arg)
{
zio_t *pio = arg;
zio_t *cio, *cio_next;
ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
ASSERT(pio->io_gang_leader == NULL);
ASSERT(pio->io_gang_tree == NULL);
pio->io_flags = pio->io_orig_flags;
pio->io_stage = pio->io_orig_stage;
pio->io_pipeline = pio->io_orig_pipeline;
pio->io_reexecute = 0;
pio->io_flags |= ZIO_FLAG_REEXECUTED;
pio->io_pipeline_trace = 0;
pio->io_error = 0;
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_state[w] = 0;
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
pio->io_child_error[c] = 0;
if (IO_IS_ALLOCATING(pio))
BP_ZERO(pio->io_bp);
/*
* As we reexecute pio's children, new children could be created.
* New children go to the head of pio's io_child_list, however,
* so we will (correctly) not reexecute them. The key is that
* the remainder of pio's io_child_list, from 'cio_next' onward,
* cannot be affected by any side effects of reexecuting 'cio'.
*/
zio_link_t *zl = NULL;
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_children[cio->io_child_type][w]++;
mutex_exit(&pio->io_lock);
zio_reexecute(cio);
mutex_enter(&pio->io_lock);
}
mutex_exit(&pio->io_lock);
/*
* Now that all children have been reexecuted, execute the parent.
* We don't reexecute "The Godfather" I/O here as it's the
* responsibility of the caller to wait on it.
*/
if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
pio->io_queued_timestamp = gethrtime();
__zio_execute(pio);
}
}
void
zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
{
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
fm_panic("Pool '%s' has encountered an uncorrectable I/O "
"failure and the failure mode property for this pool "
"is set to panic.", spa_name(spa));
cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O "
"failure and has been suspended.\n", spa_name(spa));
(void) zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
NULL, NULL, 0);
mutex_enter(&spa->spa_suspend_lock);
if (spa->spa_suspend_zio_root == NULL)
spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
spa->spa_suspended = reason;
if (zio != NULL) {
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
ASSERT(zio != spa->spa_suspend_zio_root);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio_unique_parent(zio) == NULL);
ASSERT(zio->io_stage == ZIO_STAGE_DONE);
zio_add_child(spa->spa_suspend_zio_root, zio);
}
mutex_exit(&spa->spa_suspend_lock);
}
int
zio_resume(spa_t *spa)
{
zio_t *pio;
/*
* Reexecute all previously suspended i/o.
*/
mutex_enter(&spa->spa_suspend_lock);
spa->spa_suspended = ZIO_SUSPEND_NONE;
cv_broadcast(&spa->spa_suspend_cv);
pio = spa->spa_suspend_zio_root;
spa->spa_suspend_zio_root = NULL;
mutex_exit(&spa->spa_suspend_lock);
if (pio == NULL)
return (0);
zio_reexecute(pio);
return (zio_wait(pio));
}
void
zio_resume_wait(spa_t *spa)
{
mutex_enter(&spa->spa_suspend_lock);
while (spa_suspended(spa))
cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
mutex_exit(&spa->spa_suspend_lock);
}
/*
* ==========================================================================
* Gang blocks.
*
* A gang block is a collection of small blocks that looks to the DMU
* like one large block. When zio_dva_allocate() cannot find a block
* of the requested size, due to either severe fragmentation or the pool
* being nearly full, it calls zio_write_gang_block() to construct the
* block from smaller fragments.
*
* A gang block consists of a gang header (zio_gbh_phys_t) and up to
* three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
* an indirect block: it's an array of block pointers. It consumes
* only one sector and hence is allocatable regardless of fragmentation.
* The gang header's bps point to its gang members, which hold the data.
*
* Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
* as the verifier to ensure uniqueness of the SHA256 checksum.
* Critically, the gang block bp's blk_cksum is the checksum of the data,
* not the gang header. This ensures that data block signatures (needed for
* deduplication) are independent of how the block is physically stored.
*
* Gang blocks can be nested: a gang member may itself be a gang block.
* Thus every gang block is a tree in which root and all interior nodes are
* gang headers, and the leaves are normal blocks that contain user data.
* The root of the gang tree is called the gang leader.
*
* To perform any operation (read, rewrite, free, claim) on a gang block,
* zio_gang_assemble() first assembles the gang tree (minus data leaves)
* in the io_gang_tree field of the original logical i/o by recursively
* reading the gang leader and all gang headers below it. This yields
* an in-core tree containing the contents of every gang header and the
* bps for every constituent of the gang block.
*
* With the gang tree now assembled, zio_gang_issue() just walks the gang tree
* and invokes a callback on each bp. To free a gang block, zio_gang_issue()
* calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
* zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
* zio_read_gang() is a wrapper around zio_read() that omits reading gang
* headers, since we already have those in io_gang_tree. zio_rewrite_gang()
* performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
* of the gang header plus zio_checksum_compute() of the data to update the
* gang header's blk_cksum as described above.
*
* The two-phase assemble/issue model solves the problem of partial failure --
* what if you'd freed part of a gang block but then couldn't read the
* gang header for another part? Assembling the entire gang tree first
* ensures that all the necessary gang header I/O has succeeded before
* starting the actual work of free, claim, or write. Once the gang tree
* is assembled, free and claim are in-memory operations that cannot fail.
*
* In the event that a gang write fails, zio_dva_unallocate() walks the
* gang tree to immediately free (i.e. insert back into the space map)
* everything we've allocated. This ensures that we don't get ENOSPC
* errors during repeated suspend/resume cycles due to a flaky device.
*
* Gang rewrites only happen during sync-to-convergence. If we can't assemble
* the gang tree, we won't modify the block, so we can safely defer the free
* (knowing that the block is still intact). If we *can* assemble the gang
* tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
* each constituent bp and we can allocate a new block on the next sync pass.
*
* In all cases, the gang tree allows complete recovery from partial failure.
* ==========================================================================
*/
static void
zio_gang_issue_func_done(zio_t *zio)
{
abd_free(zio->io_abd);
}
static zio_t *
zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
if (gn != NULL)
return (pio);
return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
BP_GET_PSIZE(bp), zio_gang_issue_func_done,
NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark));
}
static zio_t *
zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
zio_t *zio;
if (gn != NULL) {
abd_t *gbh_abd =
abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark);
/*
* As we rewrite each gang header, the pipeline will compute
* a new gang block header checksum for it; but no one will
* compute a new data checksum, so we do that here. The one
* exception is the gang leader: the pipeline already computed
* its data checksum because that stage precedes gang assembly.
* (Presently, nothing actually uses interior data checksums;
* this is just good hygiene.)
*/
if (gn != pio->io_gang_leader->io_gang_tree) {
abd_t *buf = abd_get_offset(data, offset);
zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
buf, BP_GET_PSIZE(bp));
abd_free(buf);
}
/*
* If we are here to damage data for testing purposes,
* leave the GBH alone so that we can detect the damage.
*/
if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
} else {
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
abd_get_offset(data, offset), BP_GET_PSIZE(bp),
zio_gang_issue_func_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
}
return (zio);
}
/* ARGSUSED */
static zio_t *
zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
zio_t *zio = zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
ZIO_GANG_CHILD_FLAGS(pio));
if (zio == NULL) {
zio = zio_null(pio, pio->io_spa,
NULL, NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio));
}
return (zio);
}
/* ARGSUSED */
static zio_t *
zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
}
static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
NULL,
zio_read_gang,
zio_rewrite_gang,
zio_free_gang,
zio_claim_gang,
NULL
};
static void zio_gang_tree_assemble_done(zio_t *zio);
static zio_gang_node_t *
zio_gang_node_alloc(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn;
ASSERT(*gnpp == NULL);
gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
*gnpp = gn;
return (gn);
}
static void
zio_gang_node_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
ASSERT(gn->gn_child[g] == NULL);
zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
kmem_free(gn, sizeof (*gn));
*gnpp = NULL;
}
static void
zio_gang_tree_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
if (gn == NULL)
return;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
zio_gang_tree_free(&gn->gn_child[g]);
zio_gang_node_free(gnpp);
}
static void
zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
ASSERT(gio->io_gang_leader == gio);
ASSERT(BP_IS_GANG(bp));
zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_gang_tree_assemble_done, gn, gio->io_priority,
ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
}
static void
zio_gang_tree_assemble_done(zio_t *zio)
{
zio_t *gio = zio->io_gang_leader;
zio_gang_node_t *gn = zio->io_private;
blkptr_t *bp = zio->io_bp;
ASSERT(gio == zio_unique_parent(zio));
ASSERT(zio->io_child_count == 0);
if (zio->io_error)
return;
/* this ABD was created from a linear buf in zio_gang_tree_assemble */
if (BP_SHOULD_BYTESWAP(bp))
byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
abd_free(zio->io_abd);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (!BP_IS_GANG(gbp))
continue;
zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
}
}
static void
zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
uint64_t offset)
{
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
ASSERT(BP_IS_GANG(bp) == !!gn);
ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
/*
* If you're a gang header, your data is in gn->gn_gbh.
* If you're a gang member, your data is in 'data' and gn == NULL.
*/
zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
if (gn != NULL) {
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (BP_IS_HOLE(gbp))
continue;
zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
offset);
offset += BP_GET_PSIZE(gbp);
}
}
if (gn == gio->io_gang_tree)
ASSERT3U(gio->io_size, ==, offset);
if (zio != pio)
zio_nowait(zio);
}
static zio_t *
zio_gang_assemble(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
return (zio);
}
static zio_t *
zio_gang_issue(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
0);
else
zio_gang_tree_free(&zio->io_gang_tree);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
return (zio);
}
static void
zio_write_gang_member_ready(zio_t *zio)
{
zio_t *pio = zio_unique_parent(zio);
dva_t *cdva = zio->io_bp->blk_dva;
dva_t *pdva = pio->io_bp->blk_dva;
uint64_t asize;
zio_t *gio __maybe_unused = zio->io_gang_leader;
if (BP_IS_HOLE(zio->io_bp))
return;
ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
mutex_enter(&pio->io_lock);
for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
ASSERT(DVA_GET_GANG(&pdva[d]));
asize = DVA_GET_ASIZE(&pdva[d]);
asize += DVA_GET_ASIZE(&cdva[d]);
DVA_SET_ASIZE(&pdva[d], asize);
}
mutex_exit(&pio->io_lock);
}
static void
zio_write_gang_done(zio_t *zio)
{
/*
* The io_abd field will be NULL for a zio with no data. The io_flags
* will initially have the ZIO_FLAG_NODATA bit flag set, but we can't
* check for it here as it is cleared in zio_ready.
*/
if (zio->io_abd != NULL)
abd_free(zio->io_abd);
}
static zio_t *
zio_write_gang_block(zio_t *pio, metaslab_class_t *mc)
{
spa_t *spa = pio->io_spa;
blkptr_t *bp = pio->io_bp;
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
zio_gang_node_t *gn, **gnpp;
zio_gbh_phys_t *gbh;
abd_t *gbh_abd;
uint64_t txg = pio->io_txg;
uint64_t resid = pio->io_size;
uint64_t lsize;
int copies = gio->io_prop.zp_copies;
int gbh_copies;
zio_prop_t zp;
int error;
boolean_t has_data = !(pio->io_flags & ZIO_FLAG_NODATA);
/*
* encrypted blocks need DVA[2] free so encrypted gang headers can't
* have a third copy.
*/
gbh_copies = MIN(copies + 1, spa_max_replication(spa));
if (gio->io_prop.zp_encrypt && gbh_copies >= SPA_DVAS_PER_BP)
gbh_copies = SPA_DVAS_PER_BP - 1;
int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
flags |= METASLAB_ASYNC_ALLOC;
VERIFY(zfs_refcount_held(&mc->mc_allocator[pio->io_allocator].
mca_alloc_slots, pio));
/*
* The logical zio has already placed a reservation for
* 'copies' allocation slots but gang blocks may require
* additional copies. These additional copies
* (i.e. gbh_copies - copies) are guaranteed to succeed
* since metaslab_class_throttle_reserve() always allows
* additional reservations for gang blocks.
*/
VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
pio->io_allocator, pio, flags));
}
error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
&pio->io_alloc_list, pio, pio->io_allocator);
if (error) {
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
/*
* If we failed to allocate the gang block header then
* we remove any additional allocation reservations that
* we placed here. The original reservation will
* be removed when the logical I/O goes to the ready
* stage.
*/
metaslab_class_throttle_unreserve(mc,
gbh_copies - copies, pio->io_allocator, pio);
}
pio->io_error = error;
return (pio);
}
if (pio == gio) {
gnpp = &gio->io_gang_tree;
} else {
gnpp = pio->io_private;
ASSERT(pio->io_ready == zio_write_gang_member_ready);
}
gn = zio_gang_node_alloc(gnpp);
gbh = gn->gn_gbh;
bzero(gbh, SPA_GANGBLOCKSIZE);
gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
/*
* Create the gang header.
*/
zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_write_gang_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
/*
* Create and nowait the gang children.
*/
for (int g = 0; resid != 0; resid -= lsize, g++) {
lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
SPA_MINBLOCKSIZE);
ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
zp.zp_checksum = gio->io_prop.zp_checksum;
zp.zp_compress = ZIO_COMPRESS_OFF;
zp.zp_complevel = gio->io_prop.zp_complevel;
zp.zp_type = DMU_OT_NONE;
zp.zp_level = 0;
zp.zp_copies = gio->io_prop.zp_copies;
zp.zp_dedup = B_FALSE;
zp.zp_dedup_verify = B_FALSE;
zp.zp_nopwrite = B_FALSE;
zp.zp_encrypt = gio->io_prop.zp_encrypt;
zp.zp_byteorder = gio->io_prop.zp_byteorder;
bzero(zp.zp_salt, ZIO_DATA_SALT_LEN);
bzero(zp.zp_iv, ZIO_DATA_IV_LEN);
bzero(zp.zp_mac, ZIO_DATA_MAC_LEN);
zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
has_data ? abd_get_offset(pio->io_abd, pio->io_size -
resid) : NULL, lsize, lsize, &zp,
zio_write_gang_member_ready, NULL, NULL,
zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(has_data);
/*
* Gang children won't throttle but we should
* account for their work, so reserve an allocation
* slot for them here.
*/
VERIFY(metaslab_class_throttle_reserve(mc,
zp.zp_copies, cio->io_allocator, cio, flags));
}
zio_nowait(cio);
}
/*
* Set pio's pipeline to just wait for zio to finish.
*/
pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
/*
* We didn't allocate this bp, so make sure it doesn't get unmarked.
*/
pio->io_flags &= ~ZIO_FLAG_FASTWRITE;
zio_nowait(zio);
return (pio);
}
/*
* The zio_nop_write stage in the pipeline determines if allocating a
* new bp is necessary. The nopwrite feature can handle writes in
* either syncing or open context (i.e. zil writes) and as a result is
* mutually exclusive with dedup.
*
* By leveraging a cryptographically secure checksum, such as SHA256, we
* can compare the checksums of the new data and the old to determine if
* allocating a new block is required. Note that our requirements for
* cryptographic strength are fairly weak: there can't be any accidental
* hash collisions, but we don't need to be secure against intentional
* (malicious) collisions. To trigger a nopwrite, you have to be able
* to write the file to begin with, and triggering an incorrect (hash
* collision) nopwrite is no worse than simply writing to the file.
* That said, there are no known attacks against the checksum algorithms
* used for nopwrite, assuming that the salt and the checksums
* themselves remain secret.
*/
static zio_t *
zio_nop_write(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
zio_prop_t *zp = &zio->io_prop;
ASSERT(BP_GET_LEVEL(bp) == 0);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(zp->zp_nopwrite);
ASSERT(!zp->zp_dedup);
ASSERT(zio->io_bp_override == NULL);
ASSERT(IO_IS_ALLOCATING(zio));
/*
* Check to see if the original bp and the new bp have matching
* characteristics (i.e. same checksum, compression algorithms, etc).
* If they don't then just continue with the pipeline which will
* allocate a new bp.
*/
if (BP_IS_HOLE(bp_orig) ||
!(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE) ||
BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
zp->zp_copies != BP_GET_NDVAS(bp_orig))
return (zio);
/*
* If the checksums match then reset the pipeline so that we
* avoid allocating a new bp and issuing any I/O.
*/
if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE);
ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop,
sizeof (uint64_t)) == 0);
/*
* If we're overwriting a block that is currently on an
* indirect vdev, then ignore the nopwrite request and
* allow a new block to be allocated on a concrete vdev.
*/
spa_config_enter(zio->io_spa, SCL_VDEV, FTAG, RW_READER);
vdev_t *tvd = vdev_lookup_top(zio->io_spa,
DVA_GET_VDEV(&bp->blk_dva[0]));
if (tvd->vdev_ops == &vdev_indirect_ops) {
spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
return (zio);
}
spa_config_exit(zio->io_spa, SCL_VDEV, FTAG);
*bp = *bp_orig;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
zio->io_flags |= ZIO_FLAG_NOPWRITE;
}
return (zio);
}
/*
* ==========================================================================
* Dedup
* ==========================================================================
*/
static void
zio_ddt_child_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp;
zio_t *pio = zio_unique_parent(zio);
mutex_enter(&pio->io_lock);
ddp = ddt_phys_select(dde, bp);
if (zio->io_error == 0)
ddt_phys_clear(ddp); /* this ddp doesn't need repair */
if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
dde->dde_repair_abd = zio->io_abd;
else
abd_free(zio->io_abd);
mutex_exit(&pio->io_lock);
}
static zio_t *
zio_ddt_read_start(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = ddt_repair_start(ddt, bp);
ddt_phys_t *ddp = dde->dde_phys;
ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
blkptr_t blk;
ASSERT(zio->io_vsd == NULL);
zio->io_vsd = dde;
if (ddp_self == NULL)
return (zio);
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
continue;
ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
&blk);
zio_nowait(zio_read(zio, zio->io_spa, &blk,
abd_alloc_for_io(zio->io_size, B_TRUE),
zio->io_size, zio_ddt_child_read_done, dde,
zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
}
return (zio);
}
zio_nowait(zio_read(zio, zio->io_spa, bp,
zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
return (zio);
}
static zio_t *
zio_ddt_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = zio->io_vsd;
if (ddt == NULL) {
ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
return (zio);
}
if (dde == NULL) {
zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (NULL);
}
if (dde->dde_repair_abd != NULL) {
abd_copy(zio->io_abd, dde->dde_repair_abd,
zio->io_size);
zio->io_child_error[ZIO_CHILD_DDT] = 0;
}
ddt_repair_done(ddt, dde);
zio->io_vsd = NULL;
}
ASSERT(zio->io_vsd == NULL);
return (zio);
}
static boolean_t
zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
{
spa_t *spa = zio->io_spa;
boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
ASSERT(!(zio->io_bp_override && do_raw));
/*
* Note: we compare the original data, not the transformed data,
* because when zio->io_bp is an override bp, we will not have
* pushed the I/O transforms. That's an important optimization
* because otherwise we'd compress/encrypt all dmu_sync() data twice.
* However, we should never get a raw, override zio so in these
* cases we can compare the io_abd directly. This is useful because
* it allows us to do dedup verification even if we don't have access
* to the original data (for instance, if the encryption keys aren't
* loaded).
*/
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
zio_t *lio = dde->dde_lead_zio[p];
if (lio != NULL && do_raw) {
return (lio->io_size != zio->io_size ||
abd_cmp(zio->io_abd, lio->io_abd) != 0);
} else if (lio != NULL) {
return (lio->io_orig_size != zio->io_orig_size ||
abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
}
}
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
ddt_phys_t *ddp = &dde->dde_phys[p];
if (ddp->ddp_phys_birth != 0 && do_raw) {
blkptr_t blk = *zio->io_bp;
uint64_t psize;
abd_t *tmpabd;
int error;
ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
psize = BP_GET_PSIZE(&blk);
if (psize != zio->io_size)
return (B_TRUE);
ddt_exit(ddt);
tmpabd = abd_alloc_for_io(psize, B_TRUE);
error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_RAW, &zio->io_bookmark));
if (error == 0) {
if (abd_cmp(tmpabd, zio->io_abd) != 0)
error = SET_ERROR(ENOENT);
}
abd_free(tmpabd);
ddt_enter(ddt);
return (error != 0);
} else if (ddp->ddp_phys_birth != 0) {
arc_buf_t *abuf = NULL;
arc_flags_t aflags = ARC_FLAG_WAIT;
blkptr_t blk = *zio->io_bp;
int error;
ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
return (B_TRUE);
ddt_exit(ddt);
error = arc_read(NULL, spa, &blk,
arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&aflags, &zio->io_bookmark);
if (error == 0) {
if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
zio->io_orig_size) != 0)
error = SET_ERROR(ENOENT);
arc_buf_destroy(abuf, &abuf);
}
ddt_enter(ddt);
return (error != 0);
}
}
return (B_FALSE);
}
static void
zio_ddt_child_write_ready(zio_t *zio)
{
int p = zio->io_prop.zp_copies;
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p];
zio_t *pio;
if (zio->io_error)
return;
ddt_enter(ddt);
ASSERT(dde->dde_lead_zio[p] == zio);
ddt_phys_fill(ddp, zio->io_bp);
zio_link_t *zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
ddt_exit(ddt);
}
static void
zio_ddt_child_write_done(zio_t *zio)
{
int p = zio->io_prop.zp_copies;
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p];
ddt_enter(ddt);
ASSERT(ddp->ddp_refcnt == 0);
ASSERT(dde->dde_lead_zio[p] == zio);
dde->dde_lead_zio[p] = NULL;
if (zio->io_error == 0) {
zio_link_t *zl = NULL;
while (zio_walk_parents(zio, &zl) != NULL)
ddt_phys_addref(ddp);
} else {
ddt_phys_clear(ddp);
}
ddt_exit(ddt);
}
static zio_t *
zio_ddt_write(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t txg = zio->io_txg;
zio_prop_t *zp = &zio->io_prop;
int p = zp->zp_copies;
zio_t *cio = NULL;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde;
ddt_phys_t *ddp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp, B_TRUE);
ddp = &dde->dde_phys[p];
if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
/*
* If we're using a weak checksum, upgrade to a strong checksum
* and try again. If we're already using a strong checksum,
* we can't resolve it, so just convert to an ordinary write.
* (And automatically e-mail a paper to Nature?)
*/
if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP)) {
zp->zp_checksum = spa_dedup_checksum(spa);
zio_pop_transforms(zio);
zio->io_stage = ZIO_STAGE_OPEN;
BP_ZERO(bp);
} else {
zp->zp_dedup = B_FALSE;
BP_SET_DEDUP(bp, B_FALSE);
}
ASSERT(!BP_GET_DEDUP(bp));
zio->io_pipeline = ZIO_WRITE_PIPELINE;
ddt_exit(ddt);
return (zio);
}
if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
if (ddp->ddp_phys_birth != 0)
ddt_bp_fill(ddp, bp, txg);
if (dde->dde_lead_zio[p] != NULL)
zio_add_child(zio, dde->dde_lead_zio[p]);
else
ddt_phys_addref(ddp);
} else if (zio->io_bp_override) {
ASSERT(bp->blk_birth == txg);
ASSERT(BP_EQUAL(bp, zio->io_bp_override));
ddt_phys_fill(ddp, bp);
ddt_phys_addref(ddp);
} else {
cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
zio->io_orig_size, zio->io_orig_size, zp,
zio_ddt_child_write_ready, NULL, NULL,
zio_ddt_child_write_done, dde, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
dde->dde_lead_zio[p] = cio;
}
ddt_exit(ddt);
zio_nowait(cio);
return (zio);
}
ddt_entry_t *freedde; /* for debugging */
static zio_t *
zio_ddt_free(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde;
ddt_phys_t *ddp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ddt_enter(ddt);
freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
if (dde) {
ddp = ddt_phys_select(dde, bp);
if (ddp)
ddt_phys_decref(ddp);
}
ddt_exit(ddt);
return (zio);
}
/*
* ==========================================================================
* Allocate and free blocks
* ==========================================================================
*/
static zio_t *
zio_io_to_allocate(spa_t *spa, int allocator)
{
zio_t *zio;
ASSERT(MUTEX_HELD(&spa->spa_allocs[allocator].spaa_lock));
zio = avl_first(&spa->spa_allocs[allocator].spaa_tree);
if (zio == NULL)
return (NULL);
ASSERT(IO_IS_ALLOCATING(zio));
/*
* Try to place a reservation for this zio. If we're unable to
* reserve then we throttle.
*/
ASSERT3U(zio->io_allocator, ==, allocator);
if (!metaslab_class_throttle_reserve(zio->io_metaslab_class,
zio->io_prop.zp_copies, allocator, zio, 0)) {
return (NULL);
}
avl_remove(&spa->spa_allocs[allocator].spaa_tree, zio);
ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
return (zio);
}
static zio_t *
zio_dva_throttle(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_t *nio;
metaslab_class_t *mc;
/* locate an appropriate allocation class */
mc = spa_preferred_class(spa, zio->io_size, zio->io_prop.zp_type,
zio->io_prop.zp_level, zio->io_prop.zp_zpl_smallblk);
if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
!mc->mc_alloc_throttle_enabled ||
zio->io_child_type == ZIO_CHILD_GANG ||
zio->io_flags & ZIO_FLAG_NODATA) {
return (zio);
}
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
ASSERT3U(zio->io_queued_timestamp, >, 0);
ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
zbookmark_phys_t *bm = &zio->io_bookmark;
/*
* We want to try to use as many allocators as possible to help improve
* performance, but we also want logically adjacent IOs to be physically
* adjacent to improve sequential read performance. We chunk each object
* into 2^20 block regions, and then hash based on the objset, object,
* level, and region to accomplish both of these goals.
*/
int allocator = (uint_t)cityhash4(bm->zb_objset, bm->zb_object,
bm->zb_level, bm->zb_blkid >> 20) % spa->spa_alloc_count;
zio->io_allocator = allocator;
zio->io_metaslab_class = mc;
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
avl_add(&spa->spa_allocs[allocator].spaa_tree, zio);
nio = zio_io_to_allocate(spa, allocator);
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
return (nio);
}
static void
zio_allocate_dispatch(spa_t *spa, int allocator)
{
zio_t *zio;
mutex_enter(&spa->spa_allocs[allocator].spaa_lock);
zio = zio_io_to_allocate(spa, allocator);
mutex_exit(&spa->spa_allocs[allocator].spaa_lock);
if (zio == NULL)
return;
ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
ASSERT0(zio->io_error);
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
}
static zio_t *
zio_dva_allocate(zio_t *zio)
{
spa_t *spa = zio->io_spa;
metaslab_class_t *mc;
blkptr_t *bp = zio->io_bp;
int error;
int flags = 0;
if (zio->io_gang_leader == NULL) {
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
}
ASSERT(BP_IS_HOLE(bp));
ASSERT0(BP_GET_NDVAS(bp));
ASSERT3U(zio->io_prop.zp_copies, >, 0);
ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
flags |= (zio->io_flags & ZIO_FLAG_FASTWRITE) ? METASLAB_FASTWRITE : 0;
if (zio->io_flags & ZIO_FLAG_NODATA)
flags |= METASLAB_DONT_THROTTLE;
if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
flags |= METASLAB_GANG_CHILD;
if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
flags |= METASLAB_ASYNC_ALLOC;
/*
* if not already chosen, locate an appropriate allocation class
*/
mc = zio->io_metaslab_class;
if (mc == NULL) {
mc = spa_preferred_class(spa, zio->io_size,
zio->io_prop.zp_type, zio->io_prop.zp_level,
zio->io_prop.zp_zpl_smallblk);
zio->io_metaslab_class = mc;
}
/*
* Try allocating the block in the usual metaslab class.
* If that's full, allocate it in the normal class.
* If that's full, allocate as a gang block,
* and if all are full, the allocation fails (which shouldn't happen).
*
* Note that we do not fall back on embedded slog (ZIL) space, to
* preserve unfragmented slog space, which is critical for decent
* sync write performance. If a log allocation fails, we will fall
* back to spa_sync() which is abysmal for performance.
*/
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio, zio->io_allocator);
/*
* Fallback to normal class when an alloc class is full
*/
if (error == ENOSPC && mc != spa_normal_class(spa)) {
/*
* If throttling, transfer reservation over to normal class.
* The io_allocator slot can remain the same even though we
* are switching classes.
*/
if (mc->mc_alloc_throttle_enabled &&
(zio->io_flags & ZIO_FLAG_IO_ALLOCATING)) {
metaslab_class_throttle_unreserve(mc,
zio->io_prop.zp_copies, zio->io_allocator, zio);
zio->io_flags &= ~ZIO_FLAG_IO_ALLOCATING;
VERIFY(metaslab_class_throttle_reserve(
spa_normal_class(spa),
zio->io_prop.zp_copies, zio->io_allocator, zio,
flags | METASLAB_MUST_RESERVE));
}
zio->io_metaslab_class = mc = spa_normal_class(spa);
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying normal class: zio %px, size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio, zio->io_allocator);
}
if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE) {
if (zfs_flags & ZFS_DEBUG_METASLAB_ALLOC) {
zfs_dbgmsg("%s: metaslab allocation failure, "
"trying ganging: zio %px, size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
return (zio_write_gang_block(zio, mc));
}
if (error != 0) {
if (error != ENOSPC ||
(zfs_flags & ZFS_DEBUG_METASLAB_ALLOC)) {
zfs_dbgmsg("%s: metaslab allocation failure: zio %px, "
"size %llu, error %d",
spa_name(spa), zio, (u_longlong_t)zio->io_size,
error);
}
zio->io_error = error;
}
return (zio);
}
static zio_t *
zio_dva_free(zio_t *zio)
{
metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
return (zio);
}
static zio_t *
zio_dva_claim(zio_t *zio)
{
int error;
error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
if (error)
zio->io_error = error;
return (zio);
}
/*
* Undo an allocation. This is used by zio_done() when an I/O fails
* and we want to give back the block we just allocated.
* This handles both normal blocks and gang blocks.
*/
static void
zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
{
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp))
metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE);
if (gn != NULL) {
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
zio_dva_unallocate(zio, gn->gn_child[g],
&gn->gn_gbh->zg_blkptr[g]);
}
}
}
/*
* Try to allocate an intent log block. Return 0 on success, errno on failure.
*/
int
zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
uint64_t size, boolean_t *slog)
{
int error = 1;
zio_alloc_list_t io_alloc_list;
ASSERT(txg > spa_syncing_txg(spa));
metaslab_trace_init(&io_alloc_list);
/*
* Block pointer fields are useful to metaslabs for stats and debugging.
* Fill in the obvious ones before calling into metaslab_alloc().
*/
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
BP_SET_PSIZE(new_bp, size);
BP_SET_LEVEL(new_bp, 0);
/*
* When allocating a zil block, we don't have information about
* the final destination of the block except the objset it's part
* of, so we just hash the objset ID to pick the allocator to get
* some parallelism.
*/
int flags = METASLAB_FASTWRITE | METASLAB_ZIL;
int allocator = (uint_t)cityhash4(0, 0, 0,
os->os_dsl_dataset->ds_object) % spa->spa_alloc_count;
error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
txg, NULL, flags, &io_alloc_list, NULL, allocator);
*slog = (error == 0);
if (error != 0) {
error = metaslab_alloc(spa, spa_embedded_log_class(spa), size,
new_bp, 1, txg, NULL, flags,
&io_alloc_list, NULL, allocator);
}
if (error != 0) {
error = metaslab_alloc(spa, spa_normal_class(spa), size,
new_bp, 1, txg, NULL, flags,
&io_alloc_list, NULL, allocator);
}
metaslab_trace_fini(&io_alloc_list);
if (error == 0) {
BP_SET_LSIZE(new_bp, size);
BP_SET_PSIZE(new_bp, size);
BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(new_bp,
spa_version(spa) >= SPA_VERSION_SLIM_ZIL
? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
BP_SET_LEVEL(new_bp, 0);
BP_SET_DEDUP(new_bp, 0);
BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
/*
* encrypted blocks will require an IV and salt. We generate
* these now since we will not be rewriting the bp at
* rewrite time.
*/
if (os->os_encrypted) {
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t salt[ZIO_DATA_SALT_LEN];
BP_SET_CRYPT(new_bp, B_TRUE);
VERIFY0(spa_crypt_get_salt(spa,
dmu_objset_id(os), salt));
VERIFY0(zio_crypt_generate_iv(iv));
zio_crypt_encode_params_bp(new_bp, salt, iv);
}
} else {
zfs_dbgmsg("%s: zil block allocation failure: "
"size %llu, error %d", spa_name(spa), (u_longlong_t)size,
error);
}
return (error);
}
/*
* ==========================================================================
* Read and write to physical devices
* ==========================================================================
*/
/*
* Issue an I/O to the underlying vdev. Typically the issue pipeline
* stops after this stage and will resume upon I/O completion.
* However, there are instances where the vdev layer may need to
* continue the pipeline when an I/O was not issued. Since the I/O
* that was sent to the vdev layer might be different than the one
* currently active in the pipeline (see vdev_queue_io()), we explicitly
* force the underlying vdev layers to call either zio_execute() or
* zio_interrupt() to ensure that the pipeline continues with the correct I/O.
*/
static zio_t *
zio_vdev_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
uint64_t align;
spa_t *spa = zio->io_spa;
zio->io_delay = 0;
ASSERT(zio->io_error == 0);
ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
if (vd == NULL) {
if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
/*
* The mirror_ops handle multiple DVAs in a single BP.
*/
vdev_mirror_ops.vdev_op_io_start(zio);
return (NULL);
}
ASSERT3P(zio->io_logical, !=, zio);
if (zio->io_type == ZIO_TYPE_WRITE) {
ASSERT(spa->spa_trust_config);
/*
* Note: the code can handle other kinds of writes,
* but we don't expect them.
*/
if (zio->io_vd->vdev_removing) {
ASSERT(zio->io_flags &
(ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
ZIO_FLAG_RESILVER | ZIO_FLAG_INDUCE_DAMAGE));
}
}
align = 1ULL << vd->vdev_top->vdev_ashift;
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
P2PHASE(zio->io_size, align) != 0) {
/* Transform logical writes to be a full physical block size. */
uint64_t asize = P2ROUNDUP(zio->io_size, align);
abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
ASSERT(vd == vd->vdev_top);
if (zio->io_type == ZIO_TYPE_WRITE) {
abd_copy(abuf, zio->io_abd, zio->io_size);
abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
}
zio_push_transform(zio, abuf, asize, asize, zio_subblock);
}
/*
* If this is not a physical io, make sure that it is properly aligned
* before proceeding.
*/
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
ASSERT0(P2PHASE(zio->io_offset, align));
ASSERT0(P2PHASE(zio->io_size, align));
} else {
/*
* For physical writes, we allow 512b aligned writes and assume
* the device will perform a read-modify-write as necessary.
*/
ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
}
VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
/*
* If this is a repair I/O, and there's no self-healing involved --
* that is, we're just resilvering what we expect to resilver --
* then don't do the I/O unless zio's txg is actually in vd's DTL.
* This prevents spurious resilvering.
*
* There are a few ways that we can end up creating these spurious
* resilver i/os:
*
* 1. A resilver i/o will be issued if any DVA in the BP has a
* dirty DTL. The mirror code will issue resilver writes to
* each DVA, including the one(s) that are not on vdevs with dirty
* DTLs.
*
* 2. With nested replication, which happens when we have a
* "replacing" or "spare" vdev that's a child of a mirror or raidz.
* For example, given mirror(replacing(A+B), C), it's likely that
* only A is out of date (it's the new device). In this case, we'll
* read from C, then use the data to resilver A+B -- but we don't
* actually want to resilver B, just A. The top-level mirror has no
* way to know this, so instead we just discard unnecessary repairs
* as we work our way down the vdev tree.
*
* 3. ZTEST also creates mirrors of mirrors, mirrors of raidz, etc.
* The same logic applies to any form of nested replication: ditto
* + mirror, RAID-Z + replacing, etc.
*
* However, indirect vdevs point off to other vdevs which may have
* DTL's, so we never bypass them. The child i/os on concrete vdevs
* will be properly bypassed instead.
*
* Leaf DTL_PARTIAL can be empty when a legitimate write comes from
* a dRAID spare vdev. For example, when a dRAID spare is first
* used, its spare blocks need to be written to but the leaf vdev's
* of such blocks can have empty DTL_PARTIAL.
*
* There seemed no clean way to allow such writes while bypassing
* spurious ones. At this point, just avoid all bypassing for dRAID
* for correctness.
*/
if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
!(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
zio->io_txg != 0 && /* not a delegated i/o */
vd->vdev_ops != &vdev_indirect_ops &&
vd->vdev_top->vdev_ops != &vdev_draid_ops &&
!vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
zio_vdev_io_bypass(zio);
return (zio);
}
/*
* Select the next best leaf I/O to process. Distributed spares are
* excluded since they dispatch the I/O directly to a leaf vdev after
* applying the dRAID mapping.
*/
if (vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops &&
(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE ||
zio->io_type == ZIO_TYPE_TRIM)) {
if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio))
return (zio);
if ((zio = vdev_queue_io(zio)) == NULL)
return (NULL);
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return (NULL);
}
zio->io_delay = gethrtime();
}
vd->vdev_ops->vdev_op_io_start(zio);
return (NULL);
}
static zio_t *
zio_vdev_io_done(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
boolean_t unexpected_error = B_FALSE;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
ASSERT(zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_WRITE || zio->io_type == ZIO_TYPE_TRIM);
if (zio->io_delay)
zio->io_delay = gethrtime() - zio->io_delay;
if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
vd->vdev_ops != &vdev_draid_spare_ops) {
vdev_queue_io_done(zio);
if (zio->io_type == ZIO_TYPE_WRITE)
vdev_cache_write(zio);
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_device_injections(vd, zio,
EIO, EILSEQ);
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_label_injection(zio, EIO);
if (zio->io_error && zio->io_type != ZIO_TYPE_TRIM) {
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
} else {
unexpected_error = B_TRUE;
}
}
}
ops->vdev_op_io_done(zio);
if (unexpected_error)
VERIFY(vdev_probe(vd, zio) == NULL);
return (zio);
}
/*
* This function is used to change the priority of an existing zio that is
* currently in-flight. This is used by the arc to upgrade priority in the
* event that a demand read is made for a block that is currently queued
* as a scrub or async read IO. Otherwise, the high priority read request
* would end up having to wait for the lower priority IO.
*/
void
zio_change_priority(zio_t *pio, zio_priority_t priority)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
vdev_queue_change_io_priority(pio, priority);
} else {
pio->io_priority = priority;
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_change_priority(cio, priority);
}
mutex_exit(&pio->io_lock);
}
/*
* For non-raidz ZIOs, we can just copy aside the bad data read from the
* disk, and use that to finish the checksum ereport later.
*/
static void
zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
const abd_t *good_buf)
{
/* no processing needed */
zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
}
/*ARGSUSED*/
void
zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr)
{
void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
abd_copy(abd, zio->io_abd, zio->io_size);
zcr->zcr_cbinfo = zio->io_size;
zcr->zcr_cbdata = abd;
zcr->zcr_finish = zio_vsd_default_cksum_finish;
zcr->zcr_free = zio_abd_free;
}
static zio_t *
zio_vdev_io_assess(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (NULL);
}
if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_exit(zio->io_spa, SCL_ZIO, zio);
if (zio->io_vsd != NULL) {
zio->io_vsd_ops->vsd_free(zio);
zio->io_vsd = NULL;
}
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_fault_injection(zio, EIO);
/*
* If the I/O failed, determine whether we should attempt to retry it.
*
* On retry, we cut in line in the issue queue, since we don't want
* compression/checksumming/etc. work to prevent our (cheap) IO reissue.
*/
if (zio->io_error && vd == NULL &&
!(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
zio->io_error = 0;
zio->io_flags |= ZIO_FLAG_IO_RETRY |
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE;
zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
zio_requeue_io_start_cut_in_line);
return (NULL);
}
/*
* If we got an error on a leaf device, convert it to ENXIO
* if the device is not accessible at all.
*/
if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
!vdev_accessible(vd, zio))
zio->io_error = SET_ERROR(ENXIO);
/*
* If we can't write to an interior vdev (mirror or RAID-Z),
* set vdev_cant_write so that we stop trying to allocate from it.
*/
if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
vdev_dbgmsg(vd, "zio_vdev_io_assess(zio=%px) setting "
"cant_write=TRUE due to write failure with ENXIO",
zio);
vd->vdev_cant_write = B_TRUE;
}
/*
* If a cache flush returns ENOTSUP or ENOTTY, we know that no future
* attempts will ever succeed. In this case we set a persistent
* boolean flag so that we don't bother with it in the future.
*/
if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
zio->io_type == ZIO_TYPE_IOCTL &&
zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL)
vd->vdev_nowritecache = B_TRUE;
if (zio->io_error)
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
zio->io_physdone != NULL) {
ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED));
ASSERT(zio->io_child_type == ZIO_CHILD_VDEV);
zio->io_physdone(zio->io_logical);
}
return (zio);
}
void
zio_vdev_io_reissue(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_stage >>= 1;
}
void
zio_vdev_io_redone(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
zio->io_stage >>= 1;
}
void
zio_vdev_io_bypass(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_flags |= ZIO_FLAG_IO_BYPASS;
zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
}
/*
* ==========================================================================
* Encrypt and store encryption parameters
* ==========================================================================
*/
/*
* This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
* managing the storage of encryption parameters and passing them to the
* lower-level encryption functions.
*/
static zio_t *
zio_encrypt(zio_t *zio)
{
zio_prop_t *zp = &zio->io_prop;
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_GET_PSIZE(bp);
uint64_t dsobj = zio->io_bookmark.zb_objset;
dmu_object_type_t ot = BP_GET_TYPE(bp);
void *enc_buf = NULL;
abd_t *eabd = NULL;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/* the root zio already encrypted the data */
if (zio->io_child_type == ZIO_CHILD_GANG)
return (zio);
/* only ZIL blocks are re-encrypted on rewrite */
if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
return (zio);
if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
BP_SET_CRYPT(bp, B_FALSE);
return (zio);
}
/* if we are doing raw encryption set the provided encryption params */
if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
ASSERT0(BP_GET_LEVEL(bp));
BP_SET_CRYPT(bp, B_TRUE);
BP_SET_BYTEORDER(bp, zp->zp_byteorder);
if (ot != DMU_OT_OBJSET)
zio_crypt_encode_mac_bp(bp, zp->zp_mac);
/* dnode blocks must be written out in the provided byteorder */
if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
ot == DMU_OT_DNODE) {
void *bswap_buf = zio_buf_alloc(psize);
abd_t *babd = abd_get_from_buf(bswap_buf, psize);
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
psize);
abd_take_ownership_of_buf(babd, B_TRUE);
zio_push_transform(zio, babd, psize, psize, NULL);
}
if (DMU_OT_IS_ENCRYPTED(ot))
zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
return (zio);
}
/* indirect blocks only maintain a cksum of the lower level MACs */
if (BP_GET_LEVEL(bp) > 0) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
mac));
zio_crypt_encode_mac_bp(bp, mac);
return (zio);
}
/*
* Objset blocks are a special case since they have 2 256-bit MACs
* embedded within them.
*/
if (ot == DMU_OT_OBJSET) {
ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
return (zio);
}
/* unencrypted object types are only authenticated with a MAC */
if (!DMU_OT_IS_ENCRYPTED(ot)) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, mac));
zio_crypt_encode_mac_bp(bp, mac);
return (zio);
}
/*
* Later passes of sync-to-convergence may decide to rewrite data
* in place to avoid more disk reallocations. This presents a problem
* for encryption because this constitutes rewriting the new data with
* the same encryption key and IV. However, this only applies to blocks
* in the MOS (particularly the spacemaps) and we do not encrypt the
* MOS. We assert that the zio is allocating or an intent log write
* to enforce this.
*/
ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
ASSERT3U(psize, !=, 0);
enc_buf = zio_buf_alloc(psize);
eabd = abd_get_from_buf(enc_buf, psize);
abd_take_ownership_of_buf(eabd, B_TRUE);
/*
* For an explanation of what encryption parameters are stored
* where, see the block comment in zio_crypt.c.
*/
if (ot == DMU_OT_INTENT_LOG) {
zio_crypt_decode_params_bp(bp, salt, iv);
} else {
BP_SET_CRYPT(bp, B_TRUE);
}
/* Perform the encryption. This should not fail */
VERIFY0(spa_do_crypt_abd(B_TRUE, spa, &zio->io_bookmark,
BP_GET_TYPE(bp), BP_GET_DEDUP(bp), BP_SHOULD_BYTESWAP(bp),
salt, iv, mac, psize, zio->io_abd, eabd, &no_crypt));
/* encode encryption metadata into the bp */
if (ot == DMU_OT_INTENT_LOG) {
/*
* ZIL blocks store the MAC in the embedded checksum, so the
* transform must always be applied.
*/
zio_crypt_encode_mac_zil(enc_buf, mac);
zio_push_transform(zio, eabd, psize, psize, NULL);
} else {
BP_SET_CRYPT(bp, B_TRUE);
zio_crypt_encode_params_bp(bp, salt, iv);
zio_crypt_encode_mac_bp(bp, mac);
if (no_crypt) {
ASSERT3U(ot, ==, DMU_OT_DNODE);
abd_free(eabd);
} else {
zio_push_transform(zio, eabd, psize, psize, NULL);
}
}
return (zio);
}
/*
* ==========================================================================
* Generate and verify checksums
* ==========================================================================
*/
static zio_t *
zio_checksum_generate(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
enum zio_checksum checksum;
if (bp == NULL) {
/*
* This is zio_write_phys().
* We're either generating a label checksum, or none at all.
*/
checksum = zio->io_prop.zp_checksum;
if (checksum == ZIO_CHECKSUM_OFF)
return (zio);
ASSERT(checksum == ZIO_CHECKSUM_LABEL);
} else {
if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
ASSERT(!IO_IS_ALLOCATING(zio));
checksum = ZIO_CHECKSUM_GANG_HEADER;
} else {
checksum = BP_GET_CHECKSUM(bp);
}
}
zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
return (zio);
}
static zio_t *
zio_checksum_verify(zio_t *zio)
{
zio_bad_cksum_t info;
blkptr_t *bp = zio->io_bp;
int error;
ASSERT(zio->io_vd != NULL);
if (bp == NULL) {
/*
* This is zio_read_phys().
* We're either verifying a label checksum, or nothing at all.
*/
if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
return (zio);
ASSERT3U(zio->io_prop.zp_checksum, ==, ZIO_CHECKSUM_LABEL);
}
if ((error = zio_checksum_error(zio, &info)) != 0) {
zio->io_error = error;
if (error == ECKSUM &&
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
(void) zfs_ereport_start_checksum(zio->io_spa,
zio->io_vd, &zio->io_bookmark, zio,
zio->io_offset, zio->io_size, &info);
mutex_enter(&zio->io_vd->vdev_stat_lock);
zio->io_vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
}
}
return (zio);
}
/*
* Called by RAID-Z to ensure we don't compute the checksum twice.
*/
void
zio_checksum_verified(zio_t *zio)
{
zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
}
/*
* ==========================================================================
* Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
* An error of 0 indicates success. ENXIO indicates whole-device failure,
* which may be transient (e.g. unplugged) or permanent. ECKSUM and EIO
* indicate errors that are specific to one I/O, and most likely permanent.
* Any other error is presumed to be worse because we weren't expecting it.
* ==========================================================================
*/
int
zio_worst_error(int e1, int e2)
{
static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
int r1, r2;
for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
if (e1 == zio_error_rank[r1])
break;
for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
if (e2 == zio_error_rank[r2])
break;
return (r1 > r2 ? e1 : e2);
}
/*
* ==========================================================================
* I/O completion
* ==========================================================================
*/
static zio_t *
zio_ready(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT,
ZIO_WAIT_READY)) {
return (NULL);
}
if (zio->io_ready) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) ||
(zio->io_flags & ZIO_FLAG_NOPWRITE));
ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
zio->io_ready(zio);
}
if (bp != NULL && bp != &zio->io_bp_copy)
zio->io_bp_copy = *bp;
if (zio->io_error != 0) {
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_metaslab_class != NULL);
/*
* We were unable to allocate anything, unreserve and
* issue the next I/O to allocate.
*/
metaslab_class_throttle_unreserve(
zio->io_metaslab_class, zio->io_prop.zp_copies,
zio->io_allocator, zio);
zio_allocate_dispatch(zio->io_spa, zio->io_allocator);
}
}
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_READY] = 1;
pio = zio_walk_parents(zio, &zl);
mutex_exit(&zio->io_lock);
/*
* As we notify zio's parents, new parents could be added.
* New parents go to the head of zio's io_parent_list, however,
* so we will (correctly) not notify them. The remainder of zio's
* io_parent_list, from 'pio_next' onward, cannot change because
* all parents must wait for us to be done before they can be done.
*/
for (; pio != NULL; pio = pio_next) {
pio_next = zio_walk_parents(zio, &zl);
zio_notify_parent(pio, zio, ZIO_WAIT_READY, NULL);
}
if (zio->io_flags & ZIO_FLAG_NODATA) {
if (BP_IS_GANG(bp)) {
zio->io_flags &= ~ZIO_FLAG_NODATA;
} else {
ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
}
}
if (zio_injection_enabled &&
zio->io_spa->spa_syncing_txg == zio->io_txg)
zio_handle_ignored_writes(zio);
return (zio);
}
/*
* Update the allocation throttle accounting.
*/
static void
zio_dva_throttle_done(zio_t *zio)
{
zio_t *lio __maybe_unused = zio->io_logical;
zio_t *pio = zio_unique_parent(zio);
vdev_t *vd = zio->io_vd;
int flags = METASLAB_ASYNC_ALLOC;
ASSERT3P(zio->io_bp, !=, NULL);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
ASSERT(vd != NULL);
ASSERT3P(vd, ==, vd->vdev_top);
ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
/*
* Parents of gang children can have two flavors -- ones that
* allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
* and ones that allocated the constituent blocks. The allocation
* throttle needs to know the allocating parent zio so we must find
* it here.
*/
if (pio->io_child_type == ZIO_CHILD_GANG) {
/*
* If our parent is a rewrite gang child then our grandparent
* would have been the one that performed the allocation.
*/
if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
pio = zio_unique_parent(pio);
flags |= METASLAB_GANG_CHILD;
}
ASSERT(IO_IS_ALLOCATING(pio));
ASSERT3P(zio, !=, zio->io_logical);
ASSERT(zio->io_logical != NULL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
ASSERT(zio->io_metaslab_class != NULL);
mutex_enter(&pio->io_lock);
metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags,
pio->io_allocator, B_TRUE);
mutex_exit(&pio->io_lock);
metaslab_class_throttle_unreserve(zio->io_metaslab_class, 1,
pio->io_allocator, pio);
/*
* Call into the pipeline to see if there is more work that
* needs to be done. If there is work to be done it will be
* dispatched to another taskq thread.
*/
zio_allocate_dispatch(zio->io_spa, pio->io_allocator);
}
static zio_t *
zio_done(zio_t *zio)
{
/*
* Always attempt to keep stack usage minimal here since
* we can be called recursively up to 19 levels deep.
*/
const uint64_t psize = zio->io_size;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
/*
* If our children haven't all completed,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
return (NULL);
}
/*
* If the allocation throttle is enabled, then update the accounting.
* We only track child I/Os that are part of an allocating async
* write. We must do this since the allocation is performed
* by the logical I/O but the actual write is done by child I/Os.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
zio->io_child_type == ZIO_CHILD_VDEV) {
ASSERT(zio->io_metaslab_class != NULL);
ASSERT(zio->io_metaslab_class->mc_alloc_throttle_enabled);
zio_dva_throttle_done(zio);
}
/*
* If the allocation throttle is enabled, verify that
* we have decremented the refcounts for every I/O that was throttled.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_bp != NULL);
metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio,
zio->io_allocator);
VERIFY(zfs_refcount_not_held(&zio->io_metaslab_class->
mc_allocator[zio->io_allocator].mca_alloc_slots, zio));
}
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
ASSERT(zio->io_children[c][w] == 0);
if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
ASSERT(zio->io_bp->blk_pad[0] == 0);
ASSERT(zio->io_bp->blk_pad[1] == 0);
ASSERT(bcmp(zio->io_bp, &zio->io_bp_copy,
sizeof (blkptr_t)) == 0 ||
(zio->io_bp == zio_unique_parent(zio)->io_bp));
if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
zio->io_bp_override == NULL &&
!(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
ASSERT3U(zio->io_prop.zp_copies, <=,
BP_GET_NDVAS(zio->io_bp));
ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
(BP_COUNT_GANG(zio->io_bp) ==
BP_GET_NDVAS(zio->io_bp)));
}
if (zio->io_flags & ZIO_FLAG_NOPWRITE)
VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
}
/*
* If there were child vdev/gang/ddt errors, they apply to us now.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
/*
* If the I/O on the transformed data was successful, generate any
* checksum reports now while we still have the transformed data.
*/
if (zio->io_error == 0) {
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
uint64_t align = zcr->zcr_align;
uint64_t asize = P2ROUNDUP(psize, align);
abd_t *adata = zio->io_abd;
if (adata != NULL && asize != psize) {
adata = abd_alloc(asize, B_TRUE);
abd_copy(adata, zio->io_abd, psize);
abd_zero_off(adata, psize, asize - psize);
}
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, adata);
zfs_ereport_free_checksum(zcr);
if (adata != NULL && asize != psize)
abd_free(adata);
}
}
zio_pop_transforms(zio); /* note: may set zio->io_error */
vdev_stat_update(zio, psize);
/*
* If this I/O is attached to a particular vdev is slow, exceeding
* 30 seconds to complete, post an error described the I/O delay.
* We ignore these errors if the device is currently unavailable.
*/
if (zio->io_delay >= MSEC2NSEC(zio_slow_io_ms)) {
if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd)) {
/*
* We want to only increment our slow IO counters if
* the IO is valid (i.e. not if the drive is removed).
*
* zfs_ereport_post() will also do these checks, but
* it can also ratelimit and have other failures, so we
* need to increment the slow_io counters independent
* of it.
*/
if (zfs_ereport_is_valid(FM_EREPORT_ZFS_DELAY,
zio->io_spa, zio->io_vd, zio)) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
zio->io_vd->vdev_stat.vs_slow_ios++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DELAY,
zio->io_spa, zio->io_vd, &zio->io_bookmark,
zio, 0);
}
}
}
if (zio->io_error) {
/*
* If this I/O is attached to a particular vdev,
* generate an error message describing the I/O failure
* at the block level. We ignore these errors if the
* device is currently unavailable.
*/
if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
!vdev_is_dead(zio->io_vd)) {
int ret = zfs_ereport_post(FM_EREPORT_ZFS_IO,
zio->io_spa, zio->io_vd, &zio->io_bookmark, zio, 0);
if (ret != EALREADY) {
mutex_enter(&zio->io_vd->vdev_stat_lock);
if (zio->io_type == ZIO_TYPE_READ)
zio->io_vd->vdev_stat.vs_read_errors++;
else if (zio->io_type == ZIO_TYPE_WRITE)
zio->io_vd->vdev_stat.vs_write_errors++;
mutex_exit(&zio->io_vd->vdev_stat_lock);
}
}
if ((zio->io_error == EIO || !(zio->io_flags &
(ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
zio == zio->io_logical) {
/*
* For logical I/O requests, tell the SPA to log the
* error and generate a logical data ereport.
*/
spa_log_error(zio->io_spa, &zio->io_bookmark);
(void) zfs_ereport_post(FM_EREPORT_ZFS_DATA,
zio->io_spa, NULL, &zio->io_bookmark, zio, 0);
}
}
if (zio->io_error && zio == zio->io_logical) {
/*
* Determine whether zio should be reexecuted. This will
* propagate all the way to the root via zio_notify_parent().
*/
ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (IO_IS_ALLOCATING(zio) &&
!(zio->io_flags & ZIO_FLAG_CANFAIL)) {
if (zio->io_error != ENOSPC)
zio->io_reexecute |= ZIO_REEXECUTE_NOW;
else
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
}
if ((zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_FREE) &&
!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
zio->io_error == ENXIO &&
spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
/*
* Here is a possibly good place to attempt to do
* either combinatorial reconstruction or error correction
* based on checksums. It also might be a good place
* to send out preliminary ereports before we suspend
* processing.
*/
}
/*
* If there were logical child errors, they apply to us now.
* We defer this until now to avoid conflating logical child
* errors with errors that happened to the zio itself when
* updating vdev stats and reporting FMA events above.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
if ((zio->io_error || zio->io_reexecute) &&
IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
!(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
zio_gang_tree_free(&zio->io_gang_tree);
/*
* Godfather I/Os should never suspend.
*/
if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
if (zio->io_reexecute) {
/*
* This is a logical I/O that wants to reexecute.
*
* Reexecute is top-down. When an i/o fails, if it's not
* the root, it simply notifies its parent and sticks around.
* The parent, seeing that it still has children in zio_done(),
* does the same. This percolates all the way up to the root.
* The root i/o will reexecute or suspend the entire tree.
*
* This approach ensures that zio_reexecute() honors
* all the original i/o dependency relationships, e.g.
* parents not executing until children are ready.
*/
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
zio->io_gang_leader = NULL;
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
/*
* "The Godfather" I/O monitors its children but is
* not a true parent to them. It will track them through
* the pipeline but severs its ties whenever they get into
* trouble (e.g. suspended). This allows "The Godfather"
* I/O to return status without blocking.
*/
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL;
pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
zio_remove_child(pio, zio, remove_zl);
/*
* This is a rare code path, so we don't
* bother with "next_to_execute".
*/
zio_notify_parent(pio, zio, ZIO_WAIT_DONE,
NULL);
}
}
if ((pio = zio_unique_parent(zio)) != NULL) {
/*
* We're not a root i/o, so there's nothing to do
* but notify our parent. Don't propagate errors
* upward since we haven't permanently failed yet.
*/
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
/*
* This is a rare code path, so we don't bother with
* "next_to_execute".
*/
zio_notify_parent(pio, zio, ZIO_WAIT_DONE, NULL);
} else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
/*
* We'd fail again if we reexecuted now, so suspend
* until conditions improve (e.g. device comes online).
*/
zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
} else {
/*
* Reexecution is potentially a huge amount of work.
* Hand it off to the otherwise-unused claim taskq.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
spa_taskq_dispatch_ent(zio->io_spa,
ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
zio_reexecute, zio, 0, &zio->io_tqent);
}
return (NULL);
}
ASSERT(zio->io_child_count == 0);
ASSERT(zio->io_reexecute == 0);
ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
/*
* Report any checksum errors, since the I/O is complete.
*/
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, NULL);
zfs_ereport_free_checksum(zcr);
}
if (zio->io_flags & ZIO_FLAG_FASTWRITE && zio->io_bp &&
!BP_IS_HOLE(zio->io_bp) && !BP_IS_EMBEDDED(zio->io_bp) &&
!(zio->io_flags & ZIO_FLAG_NOPWRITE)) {
metaslab_fastwrite_unmark(zio->io_spa, zio->io_bp);
}
/*
* It is the responsibility of the done callback to ensure that this
* particular zio is no longer discoverable for adoption, and as
* such, cannot acquire any new parents.
*/
if (zio->io_done)
zio->io_done(zio);
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
/*
* We are done executing this zio. We may want to execute a parent
* next. See the comment in zio_notify_parent().
*/
zio_t *next_to_execute = NULL;
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
zio_remove_child(pio, zio, remove_zl);
zio_notify_parent(pio, zio, ZIO_WAIT_DONE, &next_to_execute);
}
if (zio->io_waiter != NULL) {
mutex_enter(&zio->io_lock);
zio->io_executor = NULL;
cv_broadcast(&zio->io_cv);
mutex_exit(&zio->io_lock);
} else {
zio_destroy(zio);
}
return (next_to_execute);
}
/*
* ==========================================================================
* I/O pipeline definition
* ==========================================================================
*/
static zio_pipe_stage_t *zio_pipeline[] = {
NULL,
zio_read_bp_init,
zio_write_bp_init,
zio_free_bp_init,
zio_issue_async,
zio_write_compress,
zio_encrypt,
zio_checksum_generate,
zio_nop_write,
zio_ddt_read_start,
zio_ddt_read_done,
zio_ddt_write,
zio_ddt_free,
zio_gang_assemble,
zio_gang_issue,
zio_dva_throttle,
zio_dva_allocate,
zio_dva_free,
zio_dva_claim,
zio_ready,
zio_vdev_io_start,
zio_vdev_io_done,
zio_vdev_io_assess,
zio_checksum_verify,
zio_done
};
/*
* Compare two zbookmark_phys_t's to see which we would reach first in a
* pre-order traversal of the object tree.
*
* This is simple in every case aside from the meta-dnode object. For all other
* objects, we traverse them in order (object 1 before object 2, and so on).
* However, all of these objects are traversed while traversing object 0, since
* the data it points to is the list of objects. Thus, we need to convert to a
* canonical representation so we can compare meta-dnode bookmarks to
* non-meta-dnode bookmarks.
*
* We do this by calculating "equivalents" for each field of the zbookmark.
* zbookmarks outside of the meta-dnode use their own object and level, and
* calculate the level 0 equivalent (the first L0 blkid that is contained in the
* blocks this bookmark refers to) by multiplying their blkid by their span
* (the number of L0 blocks contained within one block at their level).
* zbookmarks inside the meta-dnode calculate their object equivalent
* (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
* level + 1<<31 (any value larger than a level could ever be) for their level.
* This causes them to always compare before a bookmark in their object
* equivalent, compare appropriately to bookmarks in other objects, and to
* compare appropriately to other bookmarks in the meta-dnode.
*/
int
zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
{
/*
* These variables represent the "equivalent" values for the zbookmark,
* after converting zbookmarks inside the meta dnode to their
* normal-object equivalents.
*/
uint64_t zb1obj, zb2obj;
uint64_t zb1L0, zb2L0;
uint64_t zb1level, zb2level;
if (zb1->zb_object == zb2->zb_object &&
zb1->zb_level == zb2->zb_level &&
zb1->zb_blkid == zb2->zb_blkid)
return (0);
IMPLY(zb1->zb_level > 0, ibs1 >= SPA_MINBLOCKSHIFT);
IMPLY(zb2->zb_level > 0, ibs2 >= SPA_MINBLOCKSHIFT);
/*
* BP_SPANB calculates the span in blocks.
*/
zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb1L0 = 0;
zb1level = zb1->zb_level + COMPARE_META_LEVEL;
} else {
zb1obj = zb1->zb_object;
zb1level = zb1->zb_level;
}
if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb2L0 = 0;
zb2level = zb2->zb_level + COMPARE_META_LEVEL;
} else {
zb2obj = zb2->zb_object;
zb2level = zb2->zb_level;
}
/* Now that we have a canonical representation, do the comparison. */
if (zb1obj != zb2obj)
return (zb1obj < zb2obj ? -1 : 1);
else if (zb1L0 != zb2L0)
return (zb1L0 < zb2L0 ? -1 : 1);
else if (zb1level != zb2level)
return (zb1level > zb2level ? -1 : 1);
/*
* This can (theoretically) happen if the bookmarks have the same object
* and level, but different blkids, if the block sizes are not the same.
* There is presently no way to change the indirect block sizes
*/
return (0);
}
/*
* This function checks the following: given that last_block is the place that
* our traversal stopped last time, does that guarantee that we've visited
* every node under subtree_root? Therefore, we can't just use the raw output
* of zbookmark_compare. We have to pass in a modified version of
* subtree_root; by incrementing the block id, and then checking whether
* last_block is before or equal to that, we can tell whether or not having
* visited last_block implies that all of subtree_root's children have been
* visited.
*/
boolean_t
zbookmark_subtree_completed(const dnode_phys_t *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
{
zbookmark_phys_t mod_zb = *subtree_root;
mod_zb.zb_blkid++;
ASSERT(last_block->zb_level == 0);
/* The objset_phys_t isn't before anything. */
if (dnp == NULL)
return (B_FALSE);
/*
* We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
* data block size in sectors, because that variable is only used if
* the bookmark refers to a block in the meta-dnode. Since we don't
* know without examining it what object it refers to, and there's no
* harm in passing in this value in other cases, we always pass it in.
*
* We pass in 0 for the indirect block size shift because zb2 must be
* level 0. The indirect block size is only used to calculate the span
* of the bookmark, but since the bookmark must be level 0, the span is
* always 1, so the math works out.
*
* If you make changes to how the zbookmark_compare code works, be sure
* to make sure that this code still works afterwards.
*/
return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
last_block) <= 0);
}
EXPORT_SYMBOL(zio_type_name);
EXPORT_SYMBOL(zio_buf_alloc);
EXPORT_SYMBOL(zio_data_buf_alloc);
EXPORT_SYMBOL(zio_buf_free);
EXPORT_SYMBOL(zio_data_buf_free);
/* BEGIN CSTYLED */
ZFS_MODULE_PARAM(zfs_zio, zio_, slow_io_ms, INT, ZMOD_RW,
"Max I/O completion time (milliseconds) before marking it as slow");
ZFS_MODULE_PARAM(zfs_zio, zio_, requeue_io_start_cut_in_line, INT, ZMOD_RW,
"Prioritize requeued I/O");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_deferred_free, INT, ZMOD_RW,
"Defer frees starting in this pass");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_dont_compress, INT, ZMOD_RW,
"Don't compress starting in this pass");
ZFS_MODULE_PARAM(zfs, zfs_, sync_pass_rewrite, INT, ZMOD_RW,
"Rewrite new bps starting in this pass");
ZFS_MODULE_PARAM(zfs_zio, zio_, dva_throttle_enabled, INT, ZMOD_RW,
"Throttle block allocations in the ZIO pipeline");
ZFS_MODULE_PARAM(zfs_zio, zio_, deadman_log_all, INT, ZMOD_RW,
"Log all slow ZIOs, not just those with vdevs");
/* END CSTYLED */
diff --git a/sys/contrib/openzfs/module/zfs/zio_compress.c b/sys/contrib/openzfs/module/zfs/zio_compress.c
index 33602bd471f3..1ff1e76d7f22 100644
--- a/sys/contrib/openzfs/module/zfs/zio_compress.c
+++ b/sys/contrib/openzfs/module/zfs/zio_compress.c
@@ -1,220 +1,220 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
*/
/*
* Copyright (c) 2013, 2018 by Delphix. All rights reserved.
* Copyright (c) 2019, Klara Inc.
* Copyright (c) 2019, Allan Jude
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/zfeature.h>
#include <sys/zio.h>
#include <sys/zio_compress.h>
#include <sys/zstd/zstd.h>
/*
* If nonzero, every 1/X decompression attempts will fail, simulating
* an undetected memory error.
*/
unsigned long zio_decompress_fail_fraction = 0;
/*
* Compression vectors.
*/
zio_compress_info_t zio_compress_table[ZIO_COMPRESS_FUNCTIONS] = {
{"inherit", 0, NULL, NULL, NULL},
{"on", 0, NULL, NULL, NULL},
{"uncompressed", 0, NULL, NULL, NULL},
{"lzjb", 0, lzjb_compress, lzjb_decompress, NULL},
{"empty", 0, NULL, NULL, NULL},
{"gzip-1", 1, gzip_compress, gzip_decompress, NULL},
{"gzip-2", 2, gzip_compress, gzip_decompress, NULL},
{"gzip-3", 3, gzip_compress, gzip_decompress, NULL},
{"gzip-4", 4, gzip_compress, gzip_decompress, NULL},
{"gzip-5", 5, gzip_compress, gzip_decompress, NULL},
{"gzip-6", 6, gzip_compress, gzip_decompress, NULL},
{"gzip-7", 7, gzip_compress, gzip_decompress, NULL},
{"gzip-8", 8, gzip_compress, gzip_decompress, NULL},
{"gzip-9", 9, gzip_compress, gzip_decompress, NULL},
{"zle", 64, zle_compress, zle_decompress, NULL},
{"lz4", 0, lz4_compress_zfs, lz4_decompress_zfs, NULL},
{"zstd", ZIO_ZSTD_LEVEL_DEFAULT, zfs_zstd_compress,
zfs_zstd_decompress, zfs_zstd_decompress_level},
};
uint8_t
zio_complevel_select(spa_t *spa, enum zio_compress compress, uint8_t child,
uint8_t parent)
{
uint8_t result;
if (!ZIO_COMPRESS_HASLEVEL(compress))
return (0);
result = child;
if (result == ZIO_COMPLEVEL_INHERIT)
result = parent;
return (result);
}
enum zio_compress
zio_compress_select(spa_t *spa, enum zio_compress child,
enum zio_compress parent)
{
enum zio_compress result;
ASSERT(child < ZIO_COMPRESS_FUNCTIONS);
ASSERT(parent < ZIO_COMPRESS_FUNCTIONS);
ASSERT(parent != ZIO_COMPRESS_INHERIT);
result = child;
if (result == ZIO_COMPRESS_INHERIT)
result = parent;
if (result == ZIO_COMPRESS_ON) {
if (spa_feature_is_active(spa, SPA_FEATURE_LZ4_COMPRESS))
result = ZIO_COMPRESS_LZ4_ON_VALUE;
else
result = ZIO_COMPRESS_LEGACY_ON_VALUE;
}
return (result);
}
/*ARGSUSED*/
static int
zio_compress_zeroed_cb(void *data, size_t len, void *private)
{
uint64_t *end = (uint64_t *)((char *)data + len);
for (uint64_t *word = (uint64_t *)data; word < end; word++)
if (*word != 0)
return (1);
return (0);
}
size_t
zio_compress_data(enum zio_compress c, abd_t *src, void *dst, size_t s_len,
uint8_t level)
{
size_t c_len, d_len;
uint8_t complevel;
zio_compress_info_t *ci = &zio_compress_table[c];
ASSERT((uint_t)c < ZIO_COMPRESS_FUNCTIONS);
ASSERT((uint_t)c == ZIO_COMPRESS_EMPTY || ci->ci_compress != NULL);
/*
* If the data is all zeroes, we don't even need to allocate
* a block for it. We indicate this by returning zero size.
*/
if (abd_iterate_func(src, 0, s_len, zio_compress_zeroed_cb, NULL) == 0)
return (0);
if (c == ZIO_COMPRESS_EMPTY)
return (s_len);
/* Compress at least 12.5% */
d_len = s_len - (s_len >> 3);
complevel = ci->ci_level;
if (c == ZIO_COMPRESS_ZSTD) {
/* If we don't know the level, we can't compress it */
if (level == ZIO_COMPLEVEL_INHERIT)
return (s_len);
if (level == ZIO_COMPLEVEL_DEFAULT)
complevel = ZIO_ZSTD_LEVEL_DEFAULT;
else
complevel = level;
ASSERT3U(complevel, !=, ZIO_COMPLEVEL_INHERIT);
}
/* No compression algorithms can read from ABDs directly */
void *tmp = abd_borrow_buf_copy(src, s_len);
c_len = ci->ci_compress(tmp, dst, s_len, d_len, complevel);
abd_return_buf(src, tmp, s_len);
if (c_len > d_len)
return (s_len);
ASSERT3U(c_len, <=, d_len);
return (c_len);
}
int
zio_decompress_data_buf(enum zio_compress c, void *src, void *dst,
size_t s_len, size_t d_len, uint8_t *level)
{
zio_compress_info_t *ci = &zio_compress_table[c];
if ((uint_t)c >= ZIO_COMPRESS_FUNCTIONS || ci->ci_decompress == NULL)
return (SET_ERROR(EINVAL));
if (ci->ci_decompress_level != NULL && level != NULL)
return (ci->ci_decompress_level(src, dst, s_len, d_len, level));
return (ci->ci_decompress(src, dst, s_len, d_len, ci->ci_level));
}
int
zio_decompress_data(enum zio_compress c, abd_t *src, void *dst,
size_t s_len, size_t d_len, uint8_t *level)
{
void *tmp = abd_borrow_buf_copy(src, s_len);
int ret = zio_decompress_data_buf(c, tmp, dst, s_len, d_len, level);
abd_return_buf(src, tmp, s_len);
/*
* Decompression shouldn't fail, because we've already verified
* the checksum. However, for extra protection (e.g. against bitflips
* in non-ECC RAM), we handle this error (and test it).
*/
if (zio_decompress_fail_fraction != 0 &&
random_in_range(zio_decompress_fail_fraction) == 0)
ret = SET_ERROR(EINVAL);
return (ret);
}
int
zio_compress_to_feature(enum zio_compress comp)
{
switch (comp) {
case ZIO_COMPRESS_ZSTD:
return (SPA_FEATURE_ZSTD_COMPRESS);
default:
- /* fallthru */;
+ break;
}
return (SPA_FEATURE_NONE);
}
diff --git a/sys/contrib/openzfs/module/zfs/zvol.c b/sys/contrib/openzfs/module/zfs/zvol.c
index b7bc587cf624..5438e8f928d1 100644
--- a/sys/contrib/openzfs/module/zfs/zvol.c
+++ b/sys/contrib/openzfs/module/zfs/zvol.c
@@ -1,1753 +1,1817 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049.
*
* ZFS volume emulation driver.
*
* Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
* Volumes are accessed through the symbolic links named:
*
* /dev/<pool_name>/<dataset_name>
*
* Volumes are persistent through reboot and module load. No user command
* needs to be run before opening and using a device.
*
* Copyright 2014 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2012, 2019 by Delphix. All rights reserved.
*/
/*
* Note on locking of zvol state structures.
*
* These structures are used to maintain internal state used to emulate block
* devices on top of zvols. In particular, management of device minor number
* operations - create, remove, rename, and set_snapdev - involves access to
* these structures. The zvol_state_lock is primarily used to protect the
* zvol_state_list. The zv->zv_state_lock is used to protect the contents
* of the zvol_state_t structures, as well as to make sure that when the
* time comes to remove the structure from the list, it is not in use, and
* therefore, it can be taken off zvol_state_list and freed.
*
* The zv_suspend_lock was introduced to allow for suspending I/O to a zvol,
* e.g. for the duration of receive and rollback operations. This lock can be
* held for significant periods of time. Given that it is undesirable to hold
* mutexes for long periods of time, the following lock ordering applies:
* - take zvol_state_lock if necessary, to protect zvol_state_list
* - take zv_suspend_lock if necessary, by the code path in question
* - take zv_state_lock to protect zvol_state_t
*
* The minor operations are issued to spa->spa_zvol_taskq queues, that are
* single-threaded (to preserve order of minor operations), and are executed
* through the zvol_task_cb that dispatches the specific operations. Therefore,
* these operations are serialized per pool. Consequently, we can be certain
* that for a given zvol, there is only one operation at a time in progress.
* That is why one can be sure that first, zvol_state_t for a given zvol is
* allocated and placed on zvol_state_list, and then other minor operations
* for this zvol are going to proceed in the order of issue.
*
*/
#include <sys/dataset_kstats.h>
#include <sys/dbuf.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/zil_impl.h>
#include <sys/dmu_tx.h>
#include <sys/zio.h>
#include <sys/zfs_rlock.h>
#include <sys/spa_impl.h>
#include <sys/zvol.h>
#include <sys/zvol_impl.h>
unsigned int zvol_inhibit_dev = 0;
unsigned int zvol_volmode = ZFS_VOLMODE_GEOM;
struct hlist_head *zvol_htable;
list_t zvol_state_list;
krwlock_t zvol_state_lock;
const zvol_platform_ops_t *ops;
typedef enum {
ZVOL_ASYNC_REMOVE_MINORS,
ZVOL_ASYNC_RENAME_MINORS,
ZVOL_ASYNC_SET_SNAPDEV,
ZVOL_ASYNC_SET_VOLMODE,
ZVOL_ASYNC_MAX
} zvol_async_op_t;
typedef struct {
zvol_async_op_t op;
char name1[MAXNAMELEN];
char name2[MAXNAMELEN];
uint64_t value;
} zvol_task_t;
uint64_t
zvol_name_hash(const char *name)
{
int i;
uint64_t crc = -1ULL;
const uint8_t *p = (const uint8_t *)name;
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
for (i = 0; i < MAXNAMELEN - 1 && *p; i++, p++) {
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (*p)) & 0xFF];
}
return (crc);
}
/*
* Find a zvol_state_t given the name and hash generated by zvol_name_hash.
* If found, return with zv_suspend_lock and zv_state_lock taken, otherwise,
* return (NULL) without the taking locks. The zv_suspend_lock is always taken
* before zv_state_lock. The mode argument indicates the mode (including none)
* for zv_suspend_lock to be taken.
*/
zvol_state_t *
zvol_find_by_name_hash(const char *name, uint64_t hash, int mode)
{
zvol_state_t *zv;
struct hlist_node *p = NULL;
rw_enter(&zvol_state_lock, RW_READER);
hlist_for_each(p, ZVOL_HT_HEAD(hash)) {
zv = hlist_entry(p, zvol_state_t, zv_hlink);
mutex_enter(&zv->zv_state_lock);
if (zv->zv_hash == hash &&
strncmp(zv->zv_name, name, MAXNAMELEN) == 0) {
/*
* this is the right zvol, take the locks in the
* right order
*/
if (mode != RW_NONE &&
!rw_tryenter(&zv->zv_suspend_lock, mode)) {
mutex_exit(&zv->zv_state_lock);
rw_enter(&zv->zv_suspend_lock, mode);
mutex_enter(&zv->zv_state_lock);
/*
* zvol cannot be renamed as we continue
* to hold zvol_state_lock
*/
ASSERT(zv->zv_hash == hash &&
strncmp(zv->zv_name, name, MAXNAMELEN)
== 0);
}
rw_exit(&zvol_state_lock);
return (zv);
}
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
return (NULL);
}
/*
* Find a zvol_state_t given the name.
* If found, return with zv_suspend_lock and zv_state_lock taken, otherwise,
* return (NULL) without the taking locks. The zv_suspend_lock is always taken
* before zv_state_lock. The mode argument indicates the mode (including none)
* for zv_suspend_lock to be taken.
*/
static zvol_state_t *
zvol_find_by_name(const char *name, int mode)
{
return (zvol_find_by_name_hash(name, zvol_name_hash(name), mode));
}
/*
* ZFS_IOC_CREATE callback handles dmu zvol and zap object creation.
*/
void
zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
{
zfs_creat_t *zct = arg;
nvlist_t *nvprops = zct->zct_props;
int error;
uint64_t volblocksize, volsize;
VERIFY(nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
if (nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
/*
* These properties must be removed from the list so the generic
* property setting step won't apply to them.
*/
VERIFY(nvlist_remove_all(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
(void) nvlist_remove_all(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
DMU_OT_NONE, 0, tx);
ASSERT(error == 0);
error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
DMU_OT_NONE, 0, tx);
ASSERT(error == 0);
error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
ASSERT(error == 0);
}
/*
* ZFS_IOC_OBJSET_STATS entry point.
*/
int
zvol_get_stats(objset_t *os, nvlist_t *nv)
{
int error;
dmu_object_info_t *doi;
uint64_t val;
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
if (error)
return (SET_ERROR(error));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
doi = kmem_alloc(sizeof (dmu_object_info_t), KM_SLEEP);
error = dmu_object_info(os, ZVOL_OBJ, doi);
if (error == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
doi->doi_data_block_size);
}
kmem_free(doi, sizeof (dmu_object_info_t));
return (SET_ERROR(error));
}
/*
* Sanity check volume size.
*/
int
zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
{
if (volsize == 0)
return (SET_ERROR(EINVAL));
if (volsize % blocksize != 0)
return (SET_ERROR(EINVAL));
#ifdef _ILP32
if (volsize - 1 > SPEC_MAXOFFSET_T)
return (SET_ERROR(EOVERFLOW));
#endif
return (0);
}
/*
* Ensure the zap is flushed then inform the VFS of the capacity change.
*/
static int
zvol_update_volsize(uint64_t volsize, objset_t *os)
{
dmu_tx_t *tx;
int error;
uint64_t txg;
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
return (SET_ERROR(error));
}
txg = dmu_tx_get_txg(tx);
error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
&volsize, tx);
dmu_tx_commit(tx);
txg_wait_synced(dmu_objset_pool(os), txg);
if (error == 0)
error = dmu_free_long_range(os,
ZVOL_OBJ, volsize, DMU_OBJECT_END);
return (error);
}
/*
* Set ZFS_PROP_VOLSIZE set entry point. Note that modifying the volume
* size will result in a udev "change" event being generated.
*/
int
zvol_set_volsize(const char *name, uint64_t volsize)
{
objset_t *os = NULL;
uint64_t readonly;
int error;
boolean_t owned = B_FALSE;
error = dsl_prop_get_integer(name,
zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
if (error != 0)
return (SET_ERROR(error));
if (readonly)
return (SET_ERROR(EROFS));
zvol_state_t *zv = zvol_find_by_name(name, RW_READER);
ASSERT(zv == NULL || (MUTEX_HELD(&zv->zv_state_lock) &&
RW_READ_HELD(&zv->zv_suspend_lock)));
if (zv == NULL || zv->zv_objset == NULL) {
if (zv != NULL)
rw_exit(&zv->zv_suspend_lock);
if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE, B_TRUE,
FTAG, &os)) != 0) {
if (zv != NULL)
mutex_exit(&zv->zv_state_lock);
return (SET_ERROR(error));
}
owned = B_TRUE;
if (zv != NULL)
zv->zv_objset = os;
} else {
os = zv->zv_objset;
}
dmu_object_info_t *doi = kmem_alloc(sizeof (*doi), KM_SLEEP);
if ((error = dmu_object_info(os, ZVOL_OBJ, doi)) ||
(error = zvol_check_volsize(volsize, doi->doi_data_block_size)))
goto out;
error = zvol_update_volsize(volsize, os);
if (error == 0 && zv != NULL) {
zv->zv_volsize = volsize;
zv->zv_changed = 1;
}
out:
kmem_free(doi, sizeof (dmu_object_info_t));
if (owned) {
dmu_objset_disown(os, B_TRUE, FTAG);
if (zv != NULL)
zv->zv_objset = NULL;
} else {
rw_exit(&zv->zv_suspend_lock);
}
if (zv != NULL)
mutex_exit(&zv->zv_state_lock);
if (error == 0 && zv != NULL)
ops->zv_update_volsize(zv, volsize);
return (SET_ERROR(error));
}
/*
* Sanity check volume block size.
*/
int
zvol_check_volblocksize(const char *name, uint64_t volblocksize)
{
/* Record sizes above 128k need the feature to be enabled */
if (volblocksize > SPA_OLD_MAXBLOCKSIZE) {
spa_t *spa;
int error;
if ((error = spa_open(name, &spa, FTAG)) != 0)
return (error);
if (!spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
/*
* We don't allow setting the property above 1MB,
* unless the tunable has been changed.
*/
if (volblocksize > zfs_max_recordsize)
return (SET_ERROR(EDOM));
spa_close(spa, FTAG);
}
if (volblocksize < SPA_MINBLOCKSIZE ||
volblocksize > SPA_MAXBLOCKSIZE ||
!ISP2(volblocksize))
return (SET_ERROR(EDOM));
return (0);
}
/*
* Set ZFS_PROP_VOLBLOCKSIZE set entry point.
*/
int
zvol_set_volblocksize(const char *name, uint64_t volblocksize)
{
zvol_state_t *zv;
dmu_tx_t *tx;
int error;
zv = zvol_find_by_name(name, RW_READER);
if (zv == NULL)
return (SET_ERROR(ENXIO));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
if (zv->zv_flags & ZVOL_RDONLY) {
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
return (SET_ERROR(EROFS));
}
tx = dmu_tx_create(zv->zv_objset);
dmu_tx_hold_bonus(tx, ZVOL_OBJ);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
error = dmu_object_set_blocksize(zv->zv_objset, ZVOL_OBJ,
volblocksize, 0, tx);
if (error == ENOTSUP)
error = SET_ERROR(EBUSY);
dmu_tx_commit(tx);
if (error == 0)
zv->zv_volblocksize = volblocksize;
}
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
return (SET_ERROR(error));
}
/*
* Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
* implement DKIOCFREE/free-long-range.
*/
static int
zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
{
zvol_state_t *zv = arg1;
lr_truncate_t *lr = arg2;
uint64_t offset, length;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
offset = lr->lr_offset;
length = lr->lr_length;
dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
dmu_tx_mark_netfree(tx);
int error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
zil_replaying(zv->zv_zilog, tx);
dmu_tx_commit(tx);
error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset,
length);
}
return (error);
}
/*
* Replay a TX_WRITE ZIL transaction that didn't get committed
* after a system failure
*/
static int
zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
{
zvol_state_t *zv = arg1;
lr_write_t *lr = arg2;
objset_t *os = zv->zv_objset;
char *data = (char *)(lr + 1); /* data follows lr_write_t */
uint64_t offset, length;
dmu_tx_t *tx;
int error;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
offset = lr->lr_offset;
length = lr->lr_length;
/* If it's a dmu_sync() block, write the whole block */
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
if (length < blocksize) {
offset -= offset % blocksize;
length = blocksize;
}
}
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
zil_replaying(zv->zv_zilog, tx);
dmu_tx_commit(tx);
}
return (error);
}
static int
zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap)
{
return (SET_ERROR(ENOTSUP));
}
/*
* Callback vectors for replaying records.
* Only TX_WRITE and TX_TRUNCATE are needed for zvol.
*/
zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
zvol_replay_err, /* no such transaction type */
zvol_replay_err, /* TX_CREATE */
zvol_replay_err, /* TX_MKDIR */
zvol_replay_err, /* TX_MKXATTR */
zvol_replay_err, /* TX_SYMLINK */
zvol_replay_err, /* TX_REMOVE */
zvol_replay_err, /* TX_RMDIR */
zvol_replay_err, /* TX_LINK */
zvol_replay_err, /* TX_RENAME */
zvol_replay_write, /* TX_WRITE */
zvol_replay_truncate, /* TX_TRUNCATE */
zvol_replay_err, /* TX_SETATTR */
zvol_replay_err, /* TX_ACL */
zvol_replay_err, /* TX_CREATE_ATTR */
zvol_replay_err, /* TX_CREATE_ACL_ATTR */
zvol_replay_err, /* TX_MKDIR_ACL */
zvol_replay_err, /* TX_MKDIR_ATTR */
zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
zvol_replay_err, /* TX_WRITE2 */
};
/*
* zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
*
* We store data in the log buffers if it's small enough.
* Otherwise we will later flush the data out via dmu_sync().
*/
ssize_t zvol_immediate_write_sz = 32768;
void
zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, uint64_t offset,
uint64_t size, int sync)
{
uint32_t blocksize = zv->zv_volblocksize;
zilog_t *zilog = zv->zv_zilog;
itx_wr_state_t write_state;
uint64_t sz = size;
if (zil_replaying(zilog, tx))
return;
if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
write_state = WR_INDIRECT;
else if (!spa_has_slogs(zilog->zl_spa) &&
size >= blocksize && blocksize > zvol_immediate_write_sz)
write_state = WR_INDIRECT;
else if (sync)
write_state = WR_COPIED;
else
write_state = WR_NEED_COPY;
while (size) {
itx_t *itx;
lr_write_t *lr;
itx_wr_state_t wr_state = write_state;
ssize_t len = size;
if (wr_state == WR_COPIED && size > zil_max_copied_data(zilog))
wr_state = WR_NEED_COPY;
else if (wr_state == WR_INDIRECT)
len = MIN(blocksize - P2PHASE(offset, blocksize), size);
itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
(wr_state == WR_COPIED ? len : 0));
lr = (lr_write_t *)&itx->itx_lr;
if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn,
offset, len, lr+1, DMU_READ_NO_PREFETCH) != 0) {
zil_itx_destroy(itx);
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
lr = (lr_write_t *)&itx->itx_lr;
wr_state = WR_NEED_COPY;
}
itx->itx_wr_state = wr_state;
lr->lr_foid = ZVOL_OBJ;
lr->lr_offset = offset;
lr->lr_length = len;
lr->lr_blkoff = 0;
BP_ZERO(&lr->lr_blkptr);
itx->itx_private = zv;
itx->itx_sync = sync;
(void) zil_itx_assign(zilog, itx, tx);
offset += len;
size -= len;
}
if (write_state == WR_COPIED || write_state == WR_NEED_COPY) {
dsl_pool_wrlog_count(zilog->zl_dmu_pool, sz, tx->tx_txg);
}
}
/*
* Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
*/
void
zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
boolean_t sync)
{
itx_t *itx;
lr_truncate_t *lr;
zilog_t *zilog = zv->zv_zilog;
if (zil_replaying(zilog, tx))
return;
itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
lr = (lr_truncate_t *)&itx->itx_lr;
lr->lr_foid = ZVOL_OBJ;
lr->lr_offset = off;
lr->lr_length = len;
itx->itx_sync = sync;
zil_itx_assign(zilog, itx, tx);
}
/* ARGSUSED */
static void
zvol_get_done(zgd_t *zgd, int error)
{
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
zfs_rangelock_exit(zgd->zgd_lr);
kmem_free(zgd, sizeof (zgd_t));
}
/*
* Get data to generate a TX_WRITE intent log record.
*/
int
zvol_get_data(void *arg, uint64_t arg2, lr_write_t *lr, char *buf,
struct lwb *lwb, zio_t *zio)
{
zvol_state_t *zv = arg;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
dmu_buf_t *db;
zgd_t *zgd;
int error;
ASSERT3P(lwb, !=, NULL);
ASSERT3P(zio, !=, NULL);
ASSERT3U(size, !=, 0);
zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_lwb = lwb;
/*
* Write records come in two flavors: immediate and indirect.
* For small writes it's cheaper to store the data with the
* log record (immediate); for large writes it's cheaper to
* sync the data and get a pointer to it (indirect) so that
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
size, RL_READER);
error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
DMU_READ_NO_PREFETCH);
} else { /* indirect write */
/*
* Have to lock the whole block to ensure when it's written out
* and its checksum is being calculated that no one can change
* the data. Contrarily to zfs_get_data we need not re-check
* blocksize after we get the lock because it cannot be changed.
*/
size = zv->zv_volblocksize;
offset = P2ALIGN_TYPED(offset, size, uint64_t);
zgd->zgd_lr = zfs_rangelock_enter(&zv->zv_rangelock, offset,
size, RL_READER);
error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db,
DMU_READ_NO_PREFETCH);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
zgd->zgd_db = db;
zgd->zgd_bp = bp;
ASSERT(db != NULL);
ASSERT(db->db_offset == offset);
ASSERT(db->db_size == size);
error = dmu_sync(zio, lr->lr_common.lrc_txg,
zvol_get_done, zgd);
if (error == 0)
return (0);
}
}
zvol_get_done(zgd, error);
return (SET_ERROR(error));
}
/*
* The zvol_state_t's are inserted into zvol_state_list and zvol_htable.
*/
void
zvol_insert(zvol_state_t *zv)
{
ASSERT(RW_WRITE_HELD(&zvol_state_lock));
list_insert_head(&zvol_state_list, zv);
hlist_add_head(&zv->zv_hlink, ZVOL_HT_HEAD(zv->zv_hash));
}
/*
* Simply remove the zvol from to list of zvols.
*/
static void
zvol_remove(zvol_state_t *zv)
{
ASSERT(RW_WRITE_HELD(&zvol_state_lock));
list_remove(&zvol_state_list, zv);
hlist_del(&zv->zv_hlink);
}
/*
* Setup zv after we just own the zv->objset
*/
static int
zvol_setup_zv(zvol_state_t *zv)
{
uint64_t volsize;
int error;
uint64_t ro;
objset_t *os = zv->zv_objset;
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(RW_LOCK_HELD(&zv->zv_suspend_lock));
zv->zv_zilog = NULL;
zv->zv_flags &= ~ZVOL_WRITTEN_TO;
error = dsl_prop_get_integer(zv->zv_name, "readonly", &ro, NULL);
if (error)
return (SET_ERROR(error));
error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
if (error)
return (SET_ERROR(error));
error = dnode_hold(os, ZVOL_OBJ, zv, &zv->zv_dn);
if (error)
return (SET_ERROR(error));
ops->zv_set_capacity(zv, volsize >> 9);
zv->zv_volsize = volsize;
if (ro || dmu_objset_is_snapshot(os) ||
!spa_writeable(dmu_objset_spa(os))) {
ops->zv_set_disk_ro(zv, 1);
zv->zv_flags |= ZVOL_RDONLY;
} else {
ops->zv_set_disk_ro(zv, 0);
zv->zv_flags &= ~ZVOL_RDONLY;
}
return (0);
}
/*
* Shutdown every zv_objset related stuff except zv_objset itself.
* The is the reverse of zvol_setup_zv.
*/
static void
zvol_shutdown_zv(zvol_state_t *zv)
{
ASSERT(MUTEX_HELD(&zv->zv_state_lock) &&
RW_LOCK_HELD(&zv->zv_suspend_lock));
if (zv->zv_flags & ZVOL_WRITTEN_TO) {
ASSERT(zv->zv_zilog != NULL);
zil_close(zv->zv_zilog);
}
zv->zv_zilog = NULL;
dnode_rele(zv->zv_dn, zv);
zv->zv_dn = NULL;
/*
* Evict cached data. We must write out any dirty data before
* disowning the dataset.
*/
if (zv->zv_flags & ZVOL_WRITTEN_TO)
txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
(void) dmu_objset_evict_dbufs(zv->zv_objset);
}
/*
* return the proper tag for rollback and recv
*/
void *
zvol_tag(zvol_state_t *zv)
{
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
return (zv->zv_open_count > 0 ? zv : NULL);
}
/*
* Suspend the zvol for recv and rollback.
*/
zvol_state_t *
zvol_suspend(const char *name)
{
zvol_state_t *zv;
zv = zvol_find_by_name(name, RW_WRITER);
if (zv == NULL)
return (NULL);
/* block all I/O, release in zvol_resume. */
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
atomic_inc(&zv->zv_suspend_ref);
if (zv->zv_open_count > 0)
zvol_shutdown_zv(zv);
/*
* do not hold zv_state_lock across suspend/resume to
* avoid locking up zvol lookups
*/
mutex_exit(&zv->zv_state_lock);
/* zv_suspend_lock is released in zvol_resume() */
return (zv);
}
int
zvol_resume(zvol_state_t *zv)
{
int error = 0;
ASSERT(RW_WRITE_HELD(&zv->zv_suspend_lock));
mutex_enter(&zv->zv_state_lock);
if (zv->zv_open_count > 0) {
VERIFY0(dmu_objset_hold(zv->zv_name, zv, &zv->zv_objset));
VERIFY3P(zv->zv_objset->os_dsl_dataset->ds_owner, ==, zv);
VERIFY(dsl_dataset_long_held(zv->zv_objset->os_dsl_dataset));
dmu_objset_rele(zv->zv_objset, zv);
error = zvol_setup_zv(zv);
}
mutex_exit(&zv->zv_state_lock);
rw_exit(&zv->zv_suspend_lock);
/*
* We need this because we don't hold zvol_state_lock while releasing
* zv_suspend_lock. zvol_remove_minors_impl thus cannot check
* zv_suspend_lock to determine it is safe to free because rwlock is
* not inherent atomic.
*/
atomic_dec(&zv->zv_suspend_ref);
return (SET_ERROR(error));
}
int
zvol_first_open(zvol_state_t *zv, boolean_t readonly)
{
objset_t *os;
int error, locked = 0;
boolean_t ro;
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
/*
* In all other cases the spa_namespace_lock is taken before the
* bdev->bd_mutex lock. But in this case the Linux __blkdev_get()
* function calls fops->open() with the bdev->bd_mutex lock held.
* This deadlock can be easily observed with zvols used as vdevs.
*
* To avoid a potential lock inversion deadlock we preemptively
* try to take the spa_namespace_lock(). Normally it will not
* be contended and this is safe because spa_open_common() handles
* the case where the caller already holds the spa_namespace_lock.
*
* When it is contended we risk a lock inversion if we were to
* block waiting for the lock. Luckily, the __blkdev_get()
* function allows us to return -ERESTARTSYS which will result in
* bdev->bd_mutex being dropped, reacquired, and fops->open() being
* called again. This process can be repeated safely until both
* locks are acquired.
*/
if (!mutex_owned(&spa_namespace_lock)) {
locked = mutex_tryenter(&spa_namespace_lock);
if (!locked)
return (SET_ERROR(EINTR));
}
ro = (readonly || (strchr(zv->zv_name, '@') != NULL));
error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, ro, B_TRUE, zv, &os);
if (error)
goto out_mutex;
zv->zv_objset = os;
error = zvol_setup_zv(zv);
if (error) {
dmu_objset_disown(os, 1, zv);
zv->zv_objset = NULL;
}
out_mutex:
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(error));
}
void
zvol_last_close(zvol_state_t *zv)
{
ASSERT(RW_READ_HELD(&zv->zv_suspend_lock));
ASSERT(MUTEX_HELD(&zv->zv_state_lock));
zvol_shutdown_zv(zv);
dmu_objset_disown(zv->zv_objset, 1, zv);
zv->zv_objset = NULL;
}
typedef struct minors_job {
list_t *list;
list_node_t link;
/* input */
char *name;
/* output */
int error;
} minors_job_t;
/*
* Prefetch zvol dnodes for the minors_job
*/
static void
zvol_prefetch_minors_impl(void *arg)
{
minors_job_t *job = arg;
char *dsname = job->name;
objset_t *os = NULL;
job->error = dmu_objset_own(dsname, DMU_OST_ZVOL, B_TRUE, B_TRUE,
FTAG, &os);
if (job->error == 0) {
dmu_prefetch(os, ZVOL_OBJ, 0, 0, 0, ZIO_PRIORITY_SYNC_READ);
dmu_objset_disown(os, B_TRUE, FTAG);
}
}
/*
* Mask errors to continue dmu_objset_find() traversal
*/
static int
zvol_create_snap_minor_cb(const char *dsname, void *arg)
{
minors_job_t *j = arg;
list_t *minors_list = j->list;
const char *name = j->name;
ASSERT0(MUTEX_HELD(&spa_namespace_lock));
/* skip the designated dataset */
if (name && strcmp(dsname, name) == 0)
return (0);
/* at this point, the dsname should name a snapshot */
if (strchr(dsname, '@') == 0) {
dprintf("zvol_create_snap_minor_cb(): "
"%s is not a snapshot name\n", dsname);
} else {
minors_job_t *job;
char *n = kmem_strdup(dsname);
if (n == NULL)
return (0);
job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
job->name = n;
job->list = minors_list;
job->error = 0;
list_insert_tail(minors_list, job);
/* don't care if dispatch fails, because job->error is 0 */
taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
TQ_SLEEP);
}
return (0);
}
+/*
+ * If spa_keystore_load_wkey() is called for an encrypted zvol,
+ * we need to look for any clones also using the key. This function
+ * is "best effort" - so we just skip over it if there are failures.
+ */
+static void
+zvol_add_clones(const char *dsname, list_t *minors_list)
+{
+ /* Also check if it has clones */
+ dsl_dir_t *dd = NULL;
+ dsl_pool_t *dp = NULL;
+
+ if (dsl_pool_hold(dsname, FTAG, &dp) != 0)
+ return;
+
+ if (!spa_feature_is_enabled(dp->dp_spa,
+ SPA_FEATURE_ENCRYPTION))
+ goto out;
+
+ if (dsl_dir_hold(dp, dsname, FTAG, &dd, NULL) != 0)
+ goto out;
+
+ if (dsl_dir_phys(dd)->dd_clones == 0)
+ goto out;
+
+ zap_cursor_t *zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
+ zap_attribute_t *za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
+ objset_t *mos = dd->dd_pool->dp_meta_objset;
+
+ for (zap_cursor_init(zc, mos, dsl_dir_phys(dd)->dd_clones);
+ zap_cursor_retrieve(zc, za) == 0;
+ zap_cursor_advance(zc)) {
+ dsl_dataset_t *clone;
+ minors_job_t *job;
+
+ if (dsl_dataset_hold_obj(dd->dd_pool,
+ za->za_first_integer, FTAG, &clone) == 0) {
+
+ char name[ZFS_MAX_DATASET_NAME_LEN];
+ dsl_dataset_name(clone, name);
+
+ char *n = kmem_strdup(name);
+ job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
+ job->name = n;
+ job->list = minors_list;
+ job->error = 0;
+ list_insert_tail(minors_list, job);
+
+ dsl_dataset_rele(clone, FTAG);
+ }
+ }
+ zap_cursor_fini(zc);
+ kmem_free(za, sizeof (zap_attribute_t));
+ kmem_free(zc, sizeof (zap_cursor_t));
+
+out:
+ if (dd != NULL)
+ dsl_dir_rele(dd, FTAG);
+ if (dp != NULL)
+ dsl_pool_rele(dp, FTAG);
+}
+
/*
* Mask errors to continue dmu_objset_find() traversal
*/
static int
zvol_create_minors_cb(const char *dsname, void *arg)
{
uint64_t snapdev;
int error;
list_t *minors_list = arg;
ASSERT0(MUTEX_HELD(&spa_namespace_lock));
error = dsl_prop_get_integer(dsname, "snapdev", &snapdev, NULL);
if (error)
return (0);
/*
* Given the name and the 'snapdev' property, create device minor nodes
* with the linkages to zvols/snapshots as needed.
* If the name represents a zvol, create a minor node for the zvol, then
* check if its snapshots are 'visible', and if so, iterate over the
* snapshots and create device minor nodes for those.
*/
if (strchr(dsname, '@') == 0) {
minors_job_t *job;
char *n = kmem_strdup(dsname);
if (n == NULL)
return (0);
job = kmem_alloc(sizeof (minors_job_t), KM_SLEEP);
job->name = n;
job->list = minors_list;
job->error = 0;
list_insert_tail(minors_list, job);
/* don't care if dispatch fails, because job->error is 0 */
taskq_dispatch(system_taskq, zvol_prefetch_minors_impl, job,
TQ_SLEEP);
+ zvol_add_clones(dsname, minors_list);
+
if (snapdev == ZFS_SNAPDEV_VISIBLE) {
/*
* traverse snapshots only, do not traverse children,
* and skip the 'dsname'
*/
error = dmu_objset_find(dsname,
zvol_create_snap_minor_cb, (void *)job,
DS_FIND_SNAPSHOTS);
}
} else {
dprintf("zvol_create_minors_cb(): %s is not a zvol name\n",
dsname);
}
return (0);
}
/*
* Create minors for the specified dataset, including children and snapshots.
* Pay attention to the 'snapdev' property and iterate over the snapshots
* only if they are 'visible'. This approach allows one to assure that the
* snapshot metadata is read from disk only if it is needed.
*
* The name can represent a dataset to be recursively scanned for zvols and
* their snapshots, or a single zvol snapshot. If the name represents a
* dataset, the scan is performed in two nested stages:
* - scan the dataset for zvols, and
* - for each zvol, create a minor node, then check if the zvol's snapshots
* are 'visible', and only then iterate over the snapshots if needed
*
* If the name represents a snapshot, a check is performed if the snapshot is
* 'visible' (which also verifies that the parent is a zvol), and if so,
* a minor node for that snapshot is created.
*/
void
zvol_create_minors_recursive(const char *name)
{
list_t minors_list;
minors_job_t *job;
if (zvol_inhibit_dev)
return;
/*
* This is the list for prefetch jobs. Whenever we found a match
* during dmu_objset_find, we insert a minors_job to the list and do
* taskq_dispatch to parallel prefetch zvol dnodes. Note we don't need
* any lock because all list operation is done on the current thread.
*
* We will use this list to do zvol_create_minor_impl after prefetch
* so we don't have to traverse using dmu_objset_find again.
*/
list_create(&minors_list, sizeof (minors_job_t),
offsetof(minors_job_t, link));
if (strchr(name, '@') != NULL) {
uint64_t snapdev;
int error = dsl_prop_get_integer(name, "snapdev",
&snapdev, NULL);
if (error == 0 && snapdev == ZFS_SNAPDEV_VISIBLE)
(void) ops->zv_create_minor(name);
} else {
fstrans_cookie_t cookie = spl_fstrans_mark();
(void) dmu_objset_find(name, zvol_create_minors_cb,
&minors_list, DS_FIND_CHILDREN);
spl_fstrans_unmark(cookie);
}
taskq_wait_outstanding(system_taskq, 0);
/*
* Prefetch is completed, we can do zvol_create_minor_impl
* sequentially.
*/
while ((job = list_head(&minors_list)) != NULL) {
list_remove(&minors_list, job);
if (!job->error)
(void) ops->zv_create_minor(job->name);
kmem_strfree(job->name);
kmem_free(job, sizeof (minors_job_t));
}
list_destroy(&minors_list);
}
void
zvol_create_minor(const char *name)
{
/*
* Note: the dsl_pool_config_lock must not be held.
* Minor node creation needs to obtain the zvol_state_lock.
* zvol_open() obtains the zvol_state_lock and then the dsl pool
* config lock. Therefore, we can't have the config lock now if
* we are going to wait for the zvol_state_lock, because it
* would be a lock order inversion which could lead to deadlock.
*/
if (zvol_inhibit_dev)
return;
if (strchr(name, '@') != NULL) {
uint64_t snapdev;
int error = dsl_prop_get_integer(name,
"snapdev", &snapdev, NULL);
if (error == 0 && snapdev == ZFS_SNAPDEV_VISIBLE)
(void) ops->zv_create_minor(name);
} else {
(void) ops->zv_create_minor(name);
}
}
/*
* Remove minors for specified dataset including children and snapshots.
*/
static void
zvol_free_task(void *arg)
{
ops->zv_free(arg);
}
void
zvol_remove_minors_impl(const char *name)
{
zvol_state_t *zv, *zv_next;
int namelen = ((name) ? strlen(name) : 0);
taskqid_t t;
list_t free_list;
if (zvol_inhibit_dev)
return;
list_create(&free_list, sizeof (zvol_state_t),
offsetof(zvol_state_t, zv_next));
rw_enter(&zvol_state_lock, RW_WRITER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
if (name == NULL || strcmp(zv->zv_name, name) == 0 ||
(strncmp(zv->zv_name, name, namelen) == 0 &&
(zv->zv_name[namelen] == '/' ||
zv->zv_name[namelen] == '@'))) {
/*
* By holding zv_state_lock here, we guarantee that no
* one is currently using this zv
*/
/* If in use, leave alone */
if (zv->zv_open_count > 0 ||
atomic_read(&zv->zv_suspend_ref)) {
mutex_exit(&zv->zv_state_lock);
continue;
}
zvol_remove(zv);
/*
* Cleared while holding zvol_state_lock as a writer
* which will prevent zvol_open() from opening it.
*/
ops->zv_clear_private(zv);
/* Drop zv_state_lock before zvol_free() */
mutex_exit(&zv->zv_state_lock);
/* Try parallel zv_free, if failed do it in place */
t = taskq_dispatch(system_taskq, zvol_free_task, zv,
TQ_SLEEP);
if (t == TASKQID_INVALID)
list_insert_head(&free_list, zv);
} else {
mutex_exit(&zv->zv_state_lock);
}
}
rw_exit(&zvol_state_lock);
/* Drop zvol_state_lock before calling zvol_free() */
while ((zv = list_head(&free_list)) != NULL) {
list_remove(&free_list, zv);
ops->zv_free(zv);
}
}
/* Remove minor for this specific volume only */
static void
zvol_remove_minor_impl(const char *name)
{
zvol_state_t *zv = NULL, *zv_next;
if (zvol_inhibit_dev)
return;
rw_enter(&zvol_state_lock, RW_WRITER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
if (strcmp(zv->zv_name, name) == 0) {
/*
* By holding zv_state_lock here, we guarantee that no
* one is currently using this zv
*/
/* If in use, leave alone */
if (zv->zv_open_count > 0 ||
atomic_read(&zv->zv_suspend_ref)) {
mutex_exit(&zv->zv_state_lock);
continue;
}
zvol_remove(zv);
ops->zv_clear_private(zv);
mutex_exit(&zv->zv_state_lock);
break;
} else {
mutex_exit(&zv->zv_state_lock);
}
}
/* Drop zvol_state_lock before calling zvol_free() */
rw_exit(&zvol_state_lock);
if (zv != NULL)
ops->zv_free(zv);
}
/*
* Rename minors for specified dataset including children and snapshots.
*/
static void
zvol_rename_minors_impl(const char *oldname, const char *newname)
{
zvol_state_t *zv, *zv_next;
int oldnamelen, newnamelen;
if (zvol_inhibit_dev)
return;
oldnamelen = strlen(oldname);
newnamelen = strlen(newname);
rw_enter(&zvol_state_lock, RW_READER);
for (zv = list_head(&zvol_state_list); zv != NULL; zv = zv_next) {
zv_next = list_next(&zvol_state_list, zv);
mutex_enter(&zv->zv_state_lock);
if (strcmp(zv->zv_name, oldname) == 0) {
ops->zv_rename_minor(zv, newname);
} else if (strncmp(zv->zv_name, oldname, oldnamelen) == 0 &&
(zv->zv_name[oldnamelen] == '/' ||
zv->zv_name[oldnamelen] == '@')) {
char *name = kmem_asprintf("%s%c%s", newname,
zv->zv_name[oldnamelen],
zv->zv_name + oldnamelen + 1);
ops->zv_rename_minor(zv, name);
kmem_strfree(name);
}
mutex_exit(&zv->zv_state_lock);
}
rw_exit(&zvol_state_lock);
}
typedef struct zvol_snapdev_cb_arg {
uint64_t snapdev;
} zvol_snapdev_cb_arg_t;
static int
zvol_set_snapdev_cb(const char *dsname, void *param)
{
zvol_snapdev_cb_arg_t *arg = param;
if (strchr(dsname, '@') == NULL)
return (0);
switch (arg->snapdev) {
case ZFS_SNAPDEV_VISIBLE:
(void) ops->zv_create_minor(dsname);
break;
case ZFS_SNAPDEV_HIDDEN:
(void) zvol_remove_minor_impl(dsname);
break;
}
return (0);
}
static void
zvol_set_snapdev_impl(char *name, uint64_t snapdev)
{
zvol_snapdev_cb_arg_t arg = {snapdev};
fstrans_cookie_t cookie = spl_fstrans_mark();
/*
* The zvol_set_snapdev_sync() sets snapdev appropriately
* in the dataset hierarchy. Here, we only scan snapshots.
*/
dmu_objset_find(name, zvol_set_snapdev_cb, &arg, DS_FIND_SNAPSHOTS);
spl_fstrans_unmark(cookie);
}
static void
zvol_set_volmode_impl(char *name, uint64_t volmode)
{
fstrans_cookie_t cookie;
uint64_t old_volmode;
zvol_state_t *zv;
if (strchr(name, '@') != NULL)
return;
/*
* It's unfortunate we need to remove minors before we create new ones:
* this is necessary because our backing gendisk (zvol_state->zv_disk)
* could be different when we set, for instance, volmode from "geom"
* to "dev" (or vice versa).
*/
zv = zvol_find_by_name(name, RW_NONE);
if (zv == NULL && volmode == ZFS_VOLMODE_NONE)
return;
if (zv != NULL) {
old_volmode = zv->zv_volmode;
mutex_exit(&zv->zv_state_lock);
if (old_volmode == volmode)
return;
zvol_wait_close(zv);
}
cookie = spl_fstrans_mark();
switch (volmode) {
case ZFS_VOLMODE_NONE:
(void) zvol_remove_minor_impl(name);
break;
case ZFS_VOLMODE_GEOM:
case ZFS_VOLMODE_DEV:
(void) zvol_remove_minor_impl(name);
(void) ops->zv_create_minor(name);
break;
case ZFS_VOLMODE_DEFAULT:
(void) zvol_remove_minor_impl(name);
if (zvol_volmode == ZFS_VOLMODE_NONE)
break;
else /* if zvol_volmode is invalid defaults to "geom" */
(void) ops->zv_create_minor(name);
break;
}
spl_fstrans_unmark(cookie);
}
static zvol_task_t *
zvol_task_alloc(zvol_async_op_t op, const char *name1, const char *name2,
uint64_t value)
{
zvol_task_t *task;
/* Never allow tasks on hidden names. */
if (name1[0] == '$')
return (NULL);
task = kmem_zalloc(sizeof (zvol_task_t), KM_SLEEP);
task->op = op;
task->value = value;
strlcpy(task->name1, name1, MAXNAMELEN);
if (name2 != NULL)
strlcpy(task->name2, name2, MAXNAMELEN);
return (task);
}
static void
zvol_task_free(zvol_task_t *task)
{
kmem_free(task, sizeof (zvol_task_t));
}
/*
* The worker thread function performed asynchronously.
*/
static void
zvol_task_cb(void *arg)
{
zvol_task_t *task = arg;
switch (task->op) {
case ZVOL_ASYNC_REMOVE_MINORS:
zvol_remove_minors_impl(task->name1);
break;
case ZVOL_ASYNC_RENAME_MINORS:
zvol_rename_minors_impl(task->name1, task->name2);
break;
case ZVOL_ASYNC_SET_SNAPDEV:
zvol_set_snapdev_impl(task->name1, task->value);
break;
case ZVOL_ASYNC_SET_VOLMODE:
zvol_set_volmode_impl(task->name1, task->value);
break;
default:
VERIFY(0);
break;
}
zvol_task_free(task);
}
typedef struct zvol_set_prop_int_arg {
const char *zsda_name;
uint64_t zsda_value;
zprop_source_t zsda_source;
dmu_tx_t *zsda_tx;
} zvol_set_prop_int_arg_t;
/*
* Sanity check the dataset for safe use by the sync task. No additional
* conditions are imposed.
*/
static int
zvol_set_snapdev_check(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
int error;
error = dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL);
if (error != 0)
return (error);
dsl_dir_rele(dd, FTAG);
return (error);
}
/* ARGSUSED */
static int
zvol_set_snapdev_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
char dsname[MAXNAMELEN];
zvol_task_t *task;
uint64_t snapdev;
dsl_dataset_name(ds, dsname);
if (dsl_prop_get_int_ds(ds, "snapdev", &snapdev) != 0)
return (0);
task = zvol_task_alloc(ZVOL_ASYNC_SET_SNAPDEV, dsname, NULL, snapdev);
if (task == NULL)
return (0);
(void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
task, TQ_SLEEP);
return (0);
}
/*
* Traverse all child datasets and apply snapdev appropriately.
* We call dsl_prop_set_sync_impl() here to set the value only on the toplevel
* dataset and read the effective "snapdev" on every child in the callback
* function: this is because the value is not guaranteed to be the same in the
* whole dataset hierarchy.
*/
static void
zvol_set_snapdev_sync(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
dsl_dataset_t *ds;
int error;
VERIFY0(dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL));
zsda->zsda_tx = tx;
error = dsl_dataset_hold(dp, zsda->zsda_name, FTAG, &ds);
if (error == 0) {
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_SNAPDEV),
zsda->zsda_source, sizeof (zsda->zsda_value), 1,
&zsda->zsda_value, zsda->zsda_tx);
dsl_dataset_rele(ds, FTAG);
}
dmu_objset_find_dp(dp, dd->dd_object, zvol_set_snapdev_sync_cb,
zsda, DS_FIND_CHILDREN);
dsl_dir_rele(dd, FTAG);
}
int
zvol_set_snapdev(const char *ddname, zprop_source_t source, uint64_t snapdev)
{
zvol_set_prop_int_arg_t zsda;
zsda.zsda_name = ddname;
zsda.zsda_source = source;
zsda.zsda_value = snapdev;
return (dsl_sync_task(ddname, zvol_set_snapdev_check,
zvol_set_snapdev_sync, &zsda, 0, ZFS_SPACE_CHECK_NONE));
}
/*
* Sanity check the dataset for safe use by the sync task. No additional
* conditions are imposed.
*/
static int
zvol_set_volmode_check(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
int error;
error = dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL);
if (error != 0)
return (error);
dsl_dir_rele(dd, FTAG);
return (error);
}
/* ARGSUSED */
static int
zvol_set_volmode_sync_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
char dsname[MAXNAMELEN];
zvol_task_t *task;
uint64_t volmode;
dsl_dataset_name(ds, dsname);
if (dsl_prop_get_int_ds(ds, "volmode", &volmode) != 0)
return (0);
task = zvol_task_alloc(ZVOL_ASYNC_SET_VOLMODE, dsname, NULL, volmode);
if (task == NULL)
return (0);
(void) taskq_dispatch(dp->dp_spa->spa_zvol_taskq, zvol_task_cb,
task, TQ_SLEEP);
return (0);
}
/*
* Traverse all child datasets and apply volmode appropriately.
* We call dsl_prop_set_sync_impl() here to set the value only on the toplevel
* dataset and read the effective "volmode" on every child in the callback
* function: this is because the value is not guaranteed to be the same in the
* whole dataset hierarchy.
*/
static void
zvol_set_volmode_sync(void *arg, dmu_tx_t *tx)
{
zvol_set_prop_int_arg_t *zsda = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd;
dsl_dataset_t *ds;
int error;
VERIFY0(dsl_dir_hold(dp, zsda->zsda_name, FTAG, &dd, NULL));
zsda->zsda_tx = tx;
error = dsl_dataset_hold(dp, zsda->zsda_name, FTAG, &ds);
if (error == 0) {
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_VOLMODE),
zsda->zsda_source, sizeof (zsda->zsda_value), 1,
&zsda->zsda_value, zsda->zsda_tx);
dsl_dataset_rele(ds, FTAG);
}
dmu_objset_find_dp(dp, dd->dd_object, zvol_set_volmode_sync_cb,
zsda, DS_FIND_CHILDREN);
dsl_dir_rele(dd, FTAG);
}
int
zvol_set_volmode(const char *ddname, zprop_source_t source, uint64_t volmode)
{
zvol_set_prop_int_arg_t zsda;
zsda.zsda_name = ddname;
zsda.zsda_source = source;
zsda.zsda_value = volmode;
return (dsl_sync_task(ddname, zvol_set_volmode_check,
zvol_set_volmode_sync, &zsda, 0, ZFS_SPACE_CHECK_NONE));
}
void
zvol_remove_minors(spa_t *spa, const char *name, boolean_t async)
{
zvol_task_t *task;
taskqid_t id;
task = zvol_task_alloc(ZVOL_ASYNC_REMOVE_MINORS, name, NULL, ~0ULL);
if (task == NULL)
return;
id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
if ((async == B_FALSE) && (id != TASKQID_INVALID))
taskq_wait_id(spa->spa_zvol_taskq, id);
}
void
zvol_rename_minors(spa_t *spa, const char *name1, const char *name2,
boolean_t async)
{
zvol_task_t *task;
taskqid_t id;
task = zvol_task_alloc(ZVOL_ASYNC_RENAME_MINORS, name1, name2, ~0ULL);
if (task == NULL)
return;
id = taskq_dispatch(spa->spa_zvol_taskq, zvol_task_cb, task, TQ_SLEEP);
if ((async == B_FALSE) && (id != TASKQID_INVALID))
taskq_wait_id(spa->spa_zvol_taskq, id);
}
boolean_t
zvol_is_zvol(const char *name)
{
return (ops->zv_is_zvol(name));
}
void
zvol_register_ops(const zvol_platform_ops_t *zvol_ops)
{
ops = zvol_ops;
}
int
zvol_init_impl(void)
{
int i;
list_create(&zvol_state_list, sizeof (zvol_state_t),
offsetof(zvol_state_t, zv_next));
rw_init(&zvol_state_lock, NULL, RW_DEFAULT, NULL);
zvol_htable = kmem_alloc(ZVOL_HT_SIZE * sizeof (struct hlist_head),
KM_SLEEP);
for (i = 0; i < ZVOL_HT_SIZE; i++)
INIT_HLIST_HEAD(&zvol_htable[i]);
return (0);
}
void
zvol_fini_impl(void)
{
zvol_remove_minors_impl(NULL);
/*
* The call to "zvol_remove_minors_impl" may dispatch entries to
* the system_taskq, but it doesn't wait for those entries to
* complete before it returns. Thus, we must wait for all of the
* removals to finish, before we can continue.
*/
taskq_wait_outstanding(system_taskq, 0);
kmem_free(zvol_htable, ZVOL_HT_SIZE * sizeof (struct hlist_head));
list_destroy(&zvol_state_list);
rw_destroy(&zvol_state_lock);
}
diff --git a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
index cf56e747e5c5..1acb8b9c1be2 100755
--- a/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
+++ b/sys/contrib/openzfs/tests/test-runner/bin/zts-report.py.in
@@ -1,462 +1,455 @@
#!/usr/bin/env @PYTHON_SHEBANG@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
#
# Copyright (c) 2017 by Delphix. All rights reserved.
# Copyright (c) 2018 by Lawrence Livermore National Security, LLC.
#
# This script must remain compatible with Python 2.6+ and Python 3.4+.
#
import os
import re
import sys
#
# This script parses the stdout of zfstest, which has this format:
#
# Test: /path/to/testa (run as root) [00:00] [PASS]
# Test: /path/to/testb (run as jkennedy) [00:00] [PASS]
# Test: /path/to/testc (run as root) [00:00] [FAIL]
# [...many more results...]
#
# Results Summary
# FAIL 22
# SKIP 32
# PASS 1156
#
# Running Time: 02:50:31
# Percent passed: 95.5%
# Log directory: /var/tmp/test_results/20180615T205926
#
#
# Common generic reasons for a test or test group to be skipped.
#
# Some test cases are known to fail in ways which are not harmful or dangerous.
# In these cases simply mark the test as a known failure until it can be
# updated and the issue resolved. Note that it's preferable to open a unique
# issue on the GitHub issue tracker for each test case failure.
#
known_reason = 'Known issue'
#
# Some tests require that a test user be able to execute the zfs utilities.
# This may not be possible when testing in-tree due to the default permissions
# on the user's home directory. When testing this can be resolved by granting
# group read access.
#
# chmod 0750 $HOME
#
exec_reason = 'Test user execute permissions required for utilities'
#
# Some tests require a minimum python version of 3.5 and will be skipped when
# the default system version is too old. There may also be tests which require
# additional python modules be installed, for example python-cffi is required
# by the pyzfs tests.
#
python_reason = 'Python v3.5 or newer required'
python_deps_reason = 'Python modules missing: python-cffi'
#
# Some tests require the O_TMPFILE flag which was first introduced in the
# 3.11 kernel.
#
tmpfile_reason = 'Kernel O_TMPFILE support required'
#
# Some tests require the statx(2) system call on Linux which was first
# introduced in the 4.11 kernel.
#
statx_reason = 'Kernel statx(2) system call required on Linux'
#
# Some tests require that the NFS client and server utilities be installed.
#
share_reason = 'NFS client and server utilities required'
#
# Some tests require that the lsattr utility support the project id feature.
#
project_id_reason = 'lsattr with set/show project ID required'
#
# Some tests require that the kernel support user namespaces.
#
user_ns_reason = 'Kernel user namespace support required'
#
# Some rewind tests can fail since nothing guarantees that old MOS blocks
# are not overwritten. Snapshots protect datasets and data files but not
# the MOS. Reasonable efforts are made in the test case to increase the
# odds that some txgs will have their MOS data left untouched, but it is
# never a sure thing.
#
rewind_reason = 'Arbitrary pool rewind is not guaranteed'
#
# Some tests may by structured in a way that relies on exact knowledge
# of how much free space in available in a pool. These tests cannot be
# made completely reliable because the internal details of how free space
# is managed are not exposed to user space.
#
enospc_reason = 'Exact free space reporting is not guaranteed'
#
# Some tests require a minimum version of the fio benchmark utility.
# Older distributions such as CentOS 6.x only provide fio-2.0.13.
#
fio_reason = 'Fio v2.3 or newer required'
#
# Some tests require that the DISKS provided support the discard operation.
# Normally this is not an issue because loop back devices are used for DISKS
# and they support discard (TRIM/UNMAP).
#
trim_reason = 'DISKS must support discard (TRIM/UNMAP)'
#
# Some tests on FreeBSD require the fspacectl(2) system call and the
# truncate(1) utility supporting the -d option. The system call was first
# introduced in FreeBSD version 1400032.
#
fspacectl_reason = 'fspacectl(2) and truncate -d support required'
#
# Some tests are not applicable to a platform or need to be updated to operate
# in the manor required by the platform. Any tests which are skipped for this
# reason will be suppressed in the final analysis output.
#
na_reason = "Not applicable"
#
# Some test cases doesn't have all requirements to run on Github actions CI.
#
ci_reason = 'CI runner doesn\'t have all requirements'
summary = {
'total': float(0),
'passed': float(0),
'logfile': "Could not determine logfile location."
}
#
# These tests are known to fail, thus we use this list to prevent these
# failures from failing the job as a whole; only unexpected failures
# bubble up to cause this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
known = {
'casenorm/mixed_none_lookup_ci': ['FAIL', '7633'],
'casenorm/mixed_formd_lookup_ci': ['FAIL', '7633'],
'cli_root/zfs_unshare/zfs_unshare_002_pos': ['SKIP', na_reason],
'cli_root/zfs_unshare/zfs_unshare_006_pos': ['SKIP', na_reason],
'cli_user/misc/zfs_share_001_neg': ['SKIP', na_reason],
'cli_user/misc/zfs_unshare_001_neg': ['SKIP', na_reason],
'privilege/setup': ['SKIP', na_reason],
'refreserv/refreserv_004_pos': ['FAIL', known_reason],
'rootpool/setup': ['SKIP', na_reason],
'rsend/rsend_008_pos': ['SKIP', '6066'],
'vdev_zaps/vdev_zaps_007_pos': ['FAIL', known_reason],
}
if sys.platform.startswith('freebsd'):
known.update({
'cli_root/zpool_wait/zpool_wait_trim_basic': ['SKIP', trim_reason],
'cli_root/zpool_wait/zpool_wait_trim_cancel': ['SKIP', trim_reason],
'cli_root/zpool_wait/zpool_wait_trim_flag': ['SKIP', trim_reason],
'link_count/link_count_001': ['SKIP', na_reason],
})
elif sys.platform.startswith('linux'):
known.update({
'casenorm/mixed_formd_lookup': ['FAIL', '7633'],
'casenorm/mixed_formd_delete': ['FAIL', '7633'],
'casenorm/sensitive_formd_lookup': ['FAIL', '7633'],
'casenorm/sensitive_formd_delete': ['FAIL', '7633'],
'removal/removal_with_zdb': ['SKIP', known_reason],
})
#
# These tests may occasionally fail or be skipped. We want there failures
# to be reported but only unexpected failures should bubble up to cause
# this script to exit with a non-zero exit status.
#
# Format: { 'test-name': ['expected result', 'issue-number | reason'] }
#
# For each known failure it is recommended to link to a GitHub issue by
# setting the reason to the issue number. Alternately, one of the generic
# reasons listed above can be used.
#
maybe = {
'chattr/setup': ['SKIP', exec_reason],
'crtime/crtime_001_pos': ['SKIP', statx_reason],
'cli_root/zdb/zdb_006_pos': ['FAIL', known_reason],
'cli_root/zfs_destroy/zfs_destroy_dev_removal_condense':
['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_004_pos': ['FAIL', known_reason],
'cli_root/zfs_get/zfs_get_009_pos': ['SKIP', '5479'],
'cli_root/zfs_rollback/zfs_rollback_001_pos': ['FAIL', known_reason],
'cli_root/zfs_rollback/zfs_rollback_002_pos': ['FAIL', known_reason],
'cli_root/zfs_share/setup': ['SKIP', share_reason],
'cli_root/zfs_snapshot/zfs_snapshot_002_neg': ['FAIL', known_reason],
'cli_root/zfs_unshare/setup': ['SKIP', share_reason],
'cli_root/zpool_add/zpool_add_004_pos': ['FAIL', known_reason],
'cli_root/zpool_destroy/zpool_destroy_001_pos': ['SKIP', '6145'],
'cli_root/zpool_import/import_rewind_device_replaced':
['FAIL', rewind_reason],
'cli_root/zpool_import/import_rewind_config_changed':
['FAIL', rewind_reason],
'cli_root/zpool_import/zpool_import_missing_003_pos': ['SKIP', '6839'],
'cli_root/zpool_initialize/zpool_initialize_import_export':
['FAIL', '11948'],
'cli_root/zpool_labelclear/zpool_labelclear_removed':
['FAIL', known_reason],
'cli_root/zpool_trim/setup': ['SKIP', trim_reason],
'cli_root/zpool_upgrade/zpool_upgrade_004_pos': ['FAIL', '6141'],
'delegate/setup': ['SKIP', exec_reason],
'fallocate/fallocate_punch-hole': ['SKIP', fspacectl_reason],
'history/history_004_pos': ['FAIL', '7026'],
'history/history_005_neg': ['FAIL', '6680'],
'history/history_006_neg': ['FAIL', '5657'],
'history/history_008_pos': ['FAIL', known_reason],
'history/history_010_pos': ['SKIP', exec_reason],
'io/mmap': ['SKIP', fio_reason],
'largest_pool/largest_pool_001_pos': ['FAIL', known_reason],
'mmp/mmp_on_uberblocks': ['FAIL', known_reason],
'pyzfs/pyzfs_unittest': ['SKIP', python_deps_reason],
'no_space/enospc_002_pos': ['FAIL', enospc_reason],
'pool_checkpoint/checkpoint_discard_busy': ['FAIL', '11946'],
'projectquota/setup': ['SKIP', exec_reason],
'redundancy/redundancy_004_neg': ['FAIL', '7290'],
'redundancy/redundancy_draid_spare3': ['SKIP', known_reason],
'removal/removal_condense_export': ['FAIL', known_reason],
'reservation/reservation_008_pos': ['FAIL', '7741'],
'reservation/reservation_018_pos': ['FAIL', '5642'],
'rsend/rsend_019_pos': ['FAIL', '6086'],
'rsend/rsend_020_pos': ['FAIL', '6446'],
'rsend/rsend_021_pos': ['FAIL', '6446'],
'rsend/rsend_024_pos': ['FAIL', '5665'],
'rsend/send-c_volume': ['FAIL', '6087'],
'rsend/send_partial_dataset': ['FAIL', known_reason],
'snapshot/clone_001_pos': ['FAIL', known_reason],
'snapshot/snapshot_009_pos': ['FAIL', '7961'],
'snapshot/snapshot_010_pos': ['FAIL', '7961'],
'snapused/snapused_004_pos': ['FAIL', '5513'],
'tmpfile/setup': ['SKIP', tmpfile_reason],
'threadsappend/threadsappend_001_pos': ['FAIL', '6136'],
'trim/setup': ['SKIP', trim_reason],
'upgrade/upgrade_projectquota_001_pos': ['SKIP', project_id_reason],
'user_namespace/setup': ['SKIP', user_ns_reason],
'userquota/setup': ['SKIP', exec_reason],
'vdev_zaps/vdev_zaps_004_pos': ['FAIL', '6935'],
'zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos': ['FAIL', '5848'],
'pam/setup': ['SKIP', "pamtester might be not available"],
}
if sys.platform.startswith('freebsd'):
maybe.update({
'cli_root/zfs_copies/zfs_copies_002_pos': ['FAIL', known_reason],
'cli_root/zfs_inherit/zfs_inherit_001_neg': ['FAIL', known_reason],
'cli_root/zfs_receive/receive-o-x_props_override':
['FAIL', known_reason],
'cli_root/zfs_share/zfs_share_011_pos': ['FAIL', known_reason],
'cli_root/zfs_share/zfs_share_concurrent_shares':
['FAIL', known_reason],
'cli_root/zpool_import/zpool_import_012_pos': ['FAIL', known_reason],
- 'cli_root/zpool_import/zpool_import_features_001_pos':
- ['FAIL', '11854'],
- 'cli_root/zpool_import/zpool_import_features_002_neg':
- ['FAIL', '11854'],
- 'cli_root/zpool_import/zpool_import_features_003_pos':
- ['FAIL', '11854'],
'delegate/zfs_allow_003_pos': ['FAIL', known_reason],
'inheritance/inherit_001_pos': ['FAIL', '11829'],
- 'pool_checkpoint/checkpoint_zhack_feat': ['FAIL', '11854'],
'resilver/resilver_restart_001': ['FAIL', known_reason],
'zvol/zvol_misc/zvol_misc_volmode': ['FAIL', known_reason],
})
elif sys.platform.startswith('linux'):
maybe.update({
'alloc_class/alloc_class_009_pos': ['FAIL', known_reason],
'alloc_class/alloc_class_010_pos': ['FAIL', known_reason],
'alloc_class/alloc_class_011_neg': ['FAIL', known_reason],
'alloc_class/alloc_class_012_pos': ['FAIL', known_reason],
'alloc_class/alloc_class_013_pos': ['FAIL', '11888'],
'cli_root/zfs_rename/zfs_rename_002_pos': ['FAIL', known_reason],
'cli_root/zpool_expand/zpool_expand_001_pos': ['FAIL', known_reason],
'cli_root/zpool_expand/zpool_expand_005_pos': ['FAIL', known_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['FAIL', known_reason],
'fault/auto_spare_shared': ['FAIL', '11889'],
'io/io_uring': ['SKIP', 'io_uring support required'],
'limits/filesystem_limit': ['SKIP', known_reason],
'limits/snapshot_limit': ['SKIP', known_reason],
'mmp/mmp_active_import': ['FAIL', known_reason],
'mmp/mmp_exported_import': ['FAIL', known_reason],
'mmp/mmp_inactive_import': ['FAIL', known_reason],
'refreserv/refreserv_raidz': ['FAIL', known_reason],
'rsend/rsend_007_pos': ['FAIL', known_reason],
'rsend/rsend_010_pos': ['FAIL', known_reason],
'rsend/rsend_011_pos': ['FAIL', known_reason],
'snapshot/rollback_003_pos': ['FAIL', known_reason],
})
# Not all Github actions runners have scsi_debug module, so we may skip
# some tests which use it.
if os.environ.get('CI') == 'true':
known.update({
'cli_root/zpool_expand/zpool_expand_001_pos': ['SKIP', ci_reason],
'cli_root/zpool_expand/zpool_expand_003_neg': ['SKIP', ci_reason],
'cli_root/zpool_expand/zpool_expand_005_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/setup': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_001_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_002_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_003_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_004_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_005_pos': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_006_neg': ['SKIP', ci_reason],
'cli_root/zpool_reopen/zpool_reopen_007_pos': ['SKIP', ci_reason],
'cli_root/zpool_split/zpool_split_wholedisk': ['SKIP', ci_reason],
'fault/auto_offline_001_pos': ['SKIP', ci_reason],
'fault/auto_online_001_pos': ['SKIP', ci_reason],
'fault/auto_online_002_pos': ['SKIP', ci_reason],
'fault/auto_replace_001_pos': ['SKIP', ci_reason],
'fault/auto_spare_ashift': ['SKIP', ci_reason],
'fault/auto_spare_shared': ['SKIP', ci_reason],
'procfs/pool_state': ['SKIP', ci_reason],
})
maybe.update({
'events/events_002_pos': ['FAIL', '11546'],
})
def usage(s):
print(s)
sys.exit(1)
def process_results(pathname):
try:
f = open(pathname)
except IOError as e:
print('Error opening file: %s' % e)
sys.exit(1)
prefix = '/zfs-tests/tests/functional/'
pattern = \
r'^Test(?:\s+\(\S+\))?:' + \
r'\s*\S*%s(\S+)\s*\(run as (\S+)\)\s*\[(\S+)\]\s*\[(\S+)\]' \
% prefix
pattern_log = r'^\s*Log directory:\s*(\S*)'
d = {}
for line in f.readlines():
m = re.match(pattern, line)
if m and len(m.groups()) == 4:
summary['total'] += 1
if m.group(4) == "PASS":
summary['passed'] += 1
d[m.group(1)] = m.group(4)
continue
m = re.match(pattern_log, line)
if m:
summary['logfile'] = m.group(1)
return d
if __name__ == "__main__":
if len(sys.argv) != 2:
usage('usage: %s <pathname>' % sys.argv[0])
results = process_results(sys.argv[1])
if summary['total'] == 0:
print("\n\nNo test results were found.")
print("Log directory: %s" % summary['logfile'])
sys.exit(0)
expected = []
unexpected = []
for test in list(results.keys()):
if results[test] == "PASS":
continue
setup = test.replace(os.path.basename(test), "setup")
if results[test] == "SKIP" and test != setup:
if setup in known and known[setup][0] == "SKIP":
continue
if setup in maybe and maybe[setup][0] == "SKIP":
continue
if ((test not in known or results[test] not in known[test][0]) and
(test not in maybe or results[test] not in maybe[test][0])):
unexpected.append(test)
else:
expected.append(test)
print("\nTests with results other than PASS that are expected:")
for test in sorted(expected):
issue_url = 'https://github.com/openzfs/zfs/issues/'
# Include the reason why the result is expected, given the following:
# 1. Suppress test results which set the "Not applicable" reason.
# 2. Numerical reasons are assumed to be GitHub issue numbers.
# 3. When an entire test group is skipped only report the setup reason.
if test in known:
if known[test][1] == na_reason:
continue
elif known[test][1].isdigit():
expect = issue_url + known[test][1]
else:
expect = known[test][1]
elif test in maybe:
if maybe[test][1].isdigit():
expect = issue_url + maybe[test][1]
else:
expect = maybe[test][1]
elif setup in known and known[setup][0] == "SKIP" and setup != test:
continue
elif setup in maybe and maybe[setup][0] == "SKIP" and setup != test:
continue
else:
expect = "UNKNOWN REASON"
print(" %s %s (%s)" % (results[test], test, expect))
print("\nTests with result of PASS that are unexpected:")
for test in sorted(known.keys()):
# We probably should not be silently ignoring the case
# where "test" is not in "results".
if test not in results or results[test] != "PASS":
continue
print(" %s %s (expected %s)" % (results[test], test,
known[test][0]))
print("\nTests with results other than PASS that are unexpected:")
for test in sorted(unexpected):
expect = "PASS" if test not in known else known[test][0]
print(" %s %s (expected %s)" % (results[test], test, expect))
if len(unexpected) == 0:
sys.exit(0)
else:
sys.exit(1)
diff --git a/sys/contrib/openzfs/tests/zfs-tests/cmd/mkbusy/mkbusy.c b/sys/contrib/openzfs/tests/zfs-tests/cmd/mkbusy/mkbusy.c
index ab2856b3f7c7..58ea1d07a40b 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/cmd/mkbusy/mkbusy.c
+++ b/sys/contrib/openzfs/tests/zfs-tests/cmd/mkbusy/mkbusy.c
@@ -1,153 +1,153 @@
/*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*/
/*
* Copyright (c) 2012 by Delphix. All rights reserved.
*/
/*
* Make a directory busy. If the argument is an existing file or directory,
* simply open it directly and pause. If not, verify that the parent directory
* exists, and create a new file in that directory.
*/
#include <stdio.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <dirent.h>
#include <strings.h>
#include <stdlib.h>
#include <unistd.h>
#include <errno.h>
#include <string.h>
#include <libzutil.h>
static __attribute__((noreturn)) void
usage(char *progname)
{
(void) fprintf(stderr, "Usage: %s <dirname|filename>\n", progname);
exit(1);
}
static __attribute__((noreturn)) void
fail(char *err)
{
perror(err);
exit(1);
}
static void
daemonize(void)
{
pid_t pid;
if ((pid = fork()) < 0) {
fail("fork");
} else if (pid != 0) {
(void) fprintf(stdout, "%ld\n", (long)pid);
exit(0);
}
(void) setsid();
(void) close(0);
(void) close(1);
(void) close(2);
}
int
main(int argc, char *argv[])
{
int c;
boolean_t isdir = B_FALSE;
struct stat sbuf;
char *fpath = NULL;
char *prog = argv[0];
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
default:
usage(prog);
}
}
argc -= optind;
argv += optind;
if (argc != 1)
usage(prog);
if (stat(argv[0], &sbuf) != 0) {
char *arg;
const char *dname, *fname;
size_t arglen;
ssize_t dnamelen;
/*
* The argument supplied doesn't exist. Copy the path, and
* remove the trailing slash if present.
*/
if ((arg = strdup(argv[0])) == NULL)
fail("strdup");
arglen = strlen(arg);
if (arg[arglen - 1] == '/')
arg[arglen - 1] = '\0';
/* Get the directory and file names. */
fname = zfs_basename(arg);
dname = arg;
if ((dnamelen = zfs_dirnamelen(arg)) != -1)
arg[dnamelen] = '\0';
else
dname = ".";
/* The directory portion of the path must exist */
if (stat(dname, &sbuf) != 0 || !(sbuf.st_mode & S_IFDIR))
usage(prog);
if (asprintf(&fpath, "%s/%s", dname, fname) == -1)
fail("asprintf");
free(arg);
} else
switch (sbuf.st_mode & S_IFMT) {
case S_IFDIR:
isdir = B_TRUE;
- /* FALLTHROUGH */
+ fallthrough;
case S_IFLNK:
case S_IFCHR:
case S_IFBLK:
if ((fpath = strdup(argv[0])) == NULL)
fail("strdup");
break;
default:
usage(prog);
}
if (!isdir) {
int fd;
if ((fd = open(fpath, O_CREAT | O_RDWR, 0600)) < 0)
fail("open");
} else {
DIR *dp;
if ((dp = opendir(fpath)) == NULL)
fail("opendir");
}
free(fpath);
daemonize();
(void) pause();
return (0);
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies.kshlib b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies.kshlib
index c2ccb24384a8..1273ed59df30 100644
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies.kshlib
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_copies/zfs_copies.kshlib
@@ -1,158 +1,157 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2008 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zfs_copies/zfs_copies.cfg
#
# Compare the value of copies property with specified value
# $1, the dataset name
# $2, the expected copies value
#
function cmp_prop
{
typeset ds=$1
typeset val_expect=$2
typeset val_actual
val_actual=$(get_prop copies $ds)
if [[ $val_actual != $val_expect ]]; then
log_fail "Expected value ($val_expect) != actual value " \
"($val_actual)"
fi
}
#
# Check the used space is charged correctly
# $1, the number of used space
# $2, the expected common factor between the used space and the file space
#
function check_used
{
typeset charged_spc=$1
typeset -i used
typeset -i expected_cfactor=$2
typeset -i cfactor
typeset -i fsize=${FILESIZE%[m|M]}
((used = $charged_spc / 1024 / 1024))
((cfactor = used / fsize))
if ((cfactor != expected_cfactor)); then
log_fail "The space is not charged correctly while setting" \
"copies as $expected_cfactor."
fi
}
#
# test ncopies on volume
# $1 test type zfs|ufs|ext2
# $2 copies
# $3 mntp for ufs|ext2 test
function do_vol_test
{
typeset type=$1
typeset copies=$2
typeset mntp=$3
vol=$TESTPOOL/$TESTVOL1
vol_b_path=$ZVOL_DEVDIR/$TESTPOOL/$TESTVOL1
- vol_r_path=$ZVOL_RDEVDIR/$TESTPOOL/$TESTVOL1
log_must zfs create -V $VOLSIZE -o copies=$copies $vol
log_must zfs set refreservation=none $vol
- block_device_wait $vol_r_path
+ block_device_wait $vol_b_path
case "$type" in
"ext2")
if is_freebsd; then
log_unsupported "ext2 test not implemented for freebsd"
fi
- log_must eval "new_fs $vol_r_path >/dev/null 2>&1"
+ log_must eval "new_fs $vol_b_path >/dev/null 2>&1"
log_must mount -o rw $vol_b_path $mntp
;;
"ufs")
if is_linux; then
log_unsupported "ufs test not implemented for linux"
fi
- log_must eval "new_fs $vol_r_path >/dev/null 2>&1"
+ log_must eval "new_fs $vol_b_path >/dev/null 2>&1"
log_must mount $vol_b_path $mntp
;;
"zfs")
if is_freebsd; then
# Pool creation on zvols is forbidden by default.
# Save and restore the current setting.
typeset _saved=$(get_tunable VOL_RECURSIVE)
log_must set_tunable64 VOL_RECURSIVE 1 # Allow
zpool create $TESTPOOL1 $vol_b_path
typeset _zpool_create_result=$?
log_must set_tunable64 VOL_RECURSIVE $_saved # Restore
log_must test $_zpool_create_result = 0
else
log_must zpool create $TESTPOOL1 $vol_b_path
fi
log_must zfs create $TESTPOOL1/$TESTFS1
;;
*)
log_unsupported "$type test not implemented"
;;
esac
((nfilesize = copies * ${FILESIZE%m}))
pre_used=$(get_prop used $vol)
((target_size = pre_used + nfilesize))
if [[ $type == "zfs" ]]; then
log_must mkfile $FILESIZE /$TESTPOOL1/$TESTFS1/$FILE
else
log_must mkfile $FILESIZE $mntp/$FILE
fi
post_used=$(get_prop used $vol)
((retries = 0))
while ((post_used < target_size && retries++ < 42)); do
sleep 1
post_used=$(get_prop used $vol)
done
((used = post_used - pre_used))
if ((used < nfilesize)); then
log_fail "The space is not charged correctly while setting" \
"copies as $copies ($used < $nfilesize)" \
"pre=${pre_used} post=${post_used}"
fi
if [[ $type == "zfs" ]]; then
log_must zpool destroy $TESTPOOL1
else
log_must umount $mntp
fi
log_must zfs destroy $vol
}
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_006_pos.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_006_pos.ksh
index 3ad7d4e80562..4d1605152201 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_006_pos.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_006_pos.ksh
@@ -1,85 +1,85 @@
#!/bin/ksh
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2007 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zfs_rename/zfs_rename.kshlib
#
# DESCRIPTION:
# 'zfs rename' can successfully rename a volume snapshot.
#
# STRATEGY:
# 1. Create a snapshot of volume.
# 2. Rename volume snapshot to a new one.
# 3. Rename volume to a new one.
# 4. Create a clone of the snapshot.
# 5. Verify that the rename operations are successful and zfs list can
# list them.
#
###############################################################################
verify_runnable "global"
#
# cleanup defined in zfs_rename.kshlib
#
log_onexit cleanup
log_assert "'zfs rename' can successfully rename a volume snapshot."
vol=$TESTPOOL/$TESTVOL
snap=$TESTSNAP
log_must eval "dd if=$DATA of=$VOL_R_PATH bs=$BS count=$CNT >/dev/null 2>&1"
if ! snapexists $vol@$snap; then
log_must zfs snapshot $vol@$snap
fi
rename_dataset $vol@$snap $vol@${snap}-new
rename_dataset $vol ${vol}-new
rename_dataset ${vol}-new@${snap}-new ${vol}-new@$snap
rename_dataset ${vol}-new $vol
clone=$TESTPOOL/${snap}_clone
create_clone $vol@$snap $clone
-block_device_wait
+block_device_wait $VOLDATA
#verify data integrity
for input in $VOL_R_PATH $ZVOL_RDEVDIR/$clone; do
log_must eval "dd if=$input of=$VOLDATA bs=$BS count=$CNT >/dev/null 2>&1"
if ! cmp_data $VOLDATA $DATA ; then
log_fail "$input gets corrupted after rename operation."
fi
done
destroy_clone $clone
log_must zfs destroy $vol@$snap
log_pass "'zfs rename' can rename volume snapshot as expected."
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_errata3.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_errata3.ksh
index 86baf1f6e35d..40b6ca1c1897 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_errata3.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import_errata3.ksh
@@ -1,103 +1,103 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
# CDDL HEADER END
#
#
# Copyright (c) 2017 Datto, Inc. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
# 'zpool import' should import a pool with Errata #3 while preventing
# the user from performing read write operations
#
# STRATEGY:
# 1. Import a pre-packaged pool with Errata #3
# 2. Attempt to write to the effected datasets
# 3. Attempt to read from the effected datasets
# 4. Attempt to perform a raw send of the effected datasets
# 5. Perform a regular send of the datasets under a new encryption root
# 6. Verify the new datasets can be read from and written to
# 7. Destroy the old effected datasets
# 8. Reimport the pool and verify that the errata is no longer present
#
verify_runnable "global"
POOL_NAME=cryptv0
POOL_FILE=cryptv0.dat
function uncompress_pool
{
log_note "Creating pool from $POOL_FILE"
log_must bzcat \
$STF_SUITE/tests/functional/cli_root/zpool_import/blockfiles/$POOL_FILE.bz2 \
> /$TESTPOOL/$POOL_FILE
return 0
}
function cleanup
{
poolexists $POOL_NAME && log_must zpool destroy $POOL_NAME
[[ -e /$TESTPOOL/$POOL_FILE ]] && rm /$TESTPOOL/$POOL_FILE
return 0
}
log_onexit cleanup
log_assert "Verify that Errata 3 is properly handled"
uncompress_pool
log_must zpool import -d /$TESTPOOL/ $POOL_NAME
log_must eval "zpool status $POOL_NAME | grep -q Errata" # also detects 'Errata #4'
log_must eval "zpool status $POOL_NAME | grep -q ZFS-8000-ER"
log_must eval "echo 'password' | zfs load-key $POOL_NAME/testfs"
log_must eval "echo 'password' | zfs load-key $POOL_NAME/testvol"
log_mustnot zfs mount $POOL_NAME/testfs
log_must zfs mount -o ro $POOL_NAME/testfs
old_mntpnt=$(get_prop mountpoint $POOL_NAME/testfs)
log_must eval "ls $old_mntpnt | grep -q testfile"
-block_device_wait
+block_device_wait /dev/zvol/$POOL_NAME/testvol
log_mustnot dd if=/dev/zero of=/dev/zvol/$POOL_NAME/testvol bs=512 count=1
log_must dd if=/dev/zvol/$POOL_NAME/testvol of=/dev/null bs=512 count=1
log_must zpool set feature@bookmark_v2=enabled $POOL_NAME # necessary for Errata #4
log_must eval "echo 'password' | zfs create \
-o encryption=on -o keyformat=passphrase -o keylocation=prompt \
$POOL_NAME/encroot"
log_mustnot eval "zfs send -w $POOL_NAME/testfs@snap1 | \
zfs recv $POOL_NAME/encroot/testfs"
log_mustnot eval "zfs send -w $POOL_NAME/testvol@snap1 | \
zfs recv $POOL_NAME/encroot/testvol"
log_must eval "zfs send $POOL_NAME/testfs@snap1 | \
zfs recv $POOL_NAME/encroot/testfs"
log_must eval "zfs send $POOL_NAME/testvol@snap1 | \
zfs recv $POOL_NAME/encroot/testvol"
-block_device_wait
+block_device_wait /dev/zvol/$POOL_NAME/encroot/testvol
log_must dd if=/dev/zero of=/dev/zvol/$POOL_NAME/encroot/testvol bs=512 count=1
new_mntpnt=$(get_prop mountpoint $POOL_NAME/encroot/testfs)
log_must eval "ls $new_mntpnt | grep -q testfile"
log_must zfs destroy -r $POOL_NAME/testfs
log_must zfs destroy -r $POOL_NAME/testvol
log_must zpool export $POOL_NAME
log_must zpool import -d /$TESTPOOL/ $POOL_NAME
log_mustnot eval "zpool status $POOL_NAME | grep -q 'Errata #3'"
log_pass "Errata 3 is properly handled"
diff --git a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_volmode.ksh b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_volmode.ksh
index 7451bf8b7a73..322a31e07d89 100755
--- a/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_volmode.ksh
+++ b/sys/contrib/openzfs/tests/zfs-tests/tests/functional/zvol/zvol_misc/zvol_misc_volmode.ksh
@@ -1,242 +1,244 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
. $STF_SUITE/tests/functional/cli_root/zfs_set/zfs_set_common.kshlib
. $STF_SUITE/tests/functional/zvol/zvol_common.shlib
. $STF_SUITE/tests/functional/zvol/zvol_misc/zvol_misc_common.kshlib
#
# DESCRIPTION:
# Verify that ZFS volume property "volmode" works as intended.
#
# STRATEGY:
# 1. Verify "volmode" property does not accept invalid values
# 2. Verify "volmode=none" hides ZVOL device nodes
# 3. Verify "volmode=full" exposes a fully functional device
# 4. Verify "volmode=dev" hides partition info on the device
# 5. Verify "volmode=default" behaves accordingly to "volmode" module parameter
# 6. Verify "volmode" property is inherited correctly
# 7. Verify "volmode" behaves correctly at import time
# 8. Verify "volmode" behaves accordingly to zvol_inhibit_dev (Linux only)
#
# NOTE: changing volmode may need to remove minors, which could be open, so call
# udev_wait() before we "zfs set volmode=<value>".
verify_runnable "global"
function cleanup
{
datasetexists $VOLFS && log_must_busy zfs destroy -r $VOLFS
datasetexists $ZVOL && log_must_busy zfs destroy -r $ZVOL
log_must zfs inherit volmode $TESTPOOL
udev_wait
sysctl_inhibit_dev 0
sysctl_volmode 1
udev_cleanup
}
#
# Set zvol_inhibit_dev tunable to $value
#
function sysctl_inhibit_dev # value
{
typeset value="$1"
if is_linux; then
log_note "Setting zvol_inhibit_dev tunable to $value"
log_must set_tunable32 VOL_INHIBIT_DEV $value
fi
}
#
# Set volmode tunable to $value
#
function sysctl_volmode # value
{
typeset value="$1"
log_note "Setting volmode tunable to $value"
log_must set_tunable32 VOL_MODE $value
}
#
# Exercise open and close, read and write operations
#
function test_io # dev
{
typeset dev=$1
log_must dd if=/dev/zero of=$dev count=1
log_must dd if=$dev of=/dev/null count=1
}
log_assert "Verify that ZFS volume property 'volmode' works as intended"
log_onexit cleanup
VOLFS="$TESTPOOL/volfs"
ZVOL="$TESTPOOL/vol"
ZDEV="${ZVOL_DEVDIR}/$ZVOL"
SUBZVOL="$VOLFS/subvol"
SUBZDEV="${ZVOL_DEVDIR}/$SUBZVOL"
log_must zfs create -o mountpoint=none $VOLFS
log_must zfs create -V $VOLSIZE -s $SUBZVOL
log_must zfs create -V $VOLSIZE -s $ZVOL
udev_wait
+blockdev_exists $ZDEV
+blockdev_exists $SUBZDEV
test_io $ZDEV
test_io $SUBZDEV
# 1. Verify "volmode" property does not accept invalid values
typeset badvals=("off" "on" "1" "nope" "-")
for badval in ${badvals[@]}
do
log_mustnot zfs set volmode="$badval" $ZVOL
done
# 2. Verify "volmode=none" hides ZVOL device nodes
log_must zfs set volmode=none $ZVOL
blockdev_missing $ZDEV
log_must_busy zfs destroy $ZVOL
# 3. Verify "volmode=full" exposes a fully functional device
log_must zfs create -V $VOLSIZE -s $ZVOL
udev_wait
log_must zfs set volmode=full $ZVOL
blockdev_exists $ZDEV
test_io $ZDEV
log_must verify_partition $ZDEV
udev_wait
# 3.1 Verify "volmode=geom" is an alias for "volmode=full"
log_must zfs set volmode=geom $ZVOL
blockdev_exists $ZDEV
if [[ "$(get_prop 'volmode' $ZVOL)" != "full" ]]; then
log_fail " Volmode value 'geom' is not an alias for 'full'"
fi
udev_wait
log_must_busy zfs destroy $ZVOL
# 4. Verify "volmode=dev" hides partition info on the device
log_must zfs create -V $VOLSIZE -s $ZVOL
udev_wait
log_must zfs set volmode=dev $ZVOL
blockdev_exists $ZDEV
test_io $ZDEV
log_mustnot verify_partition $ZDEV
udev_wait
log_must_busy zfs destroy $ZVOL
# 5. Verify "volmode=default" behaves accordingly to "volmode" module parameter
# 5.1 Verify sysctl "volmode=full"
sysctl_volmode 1
log_must zfs create -V $VOLSIZE -s $ZVOL
udev_wait
log_must zfs set volmode=default $ZVOL
blockdev_exists $ZDEV
log_must verify_partition $ZDEV
udev_wait
log_must_busy zfs destroy $ZVOL
# 5.2 Verify sysctl "volmode=dev"
sysctl_volmode 2
log_must zfs create -V $VOLSIZE -s $ZVOL
udev_wait
log_must zfs set volmode=default $ZVOL
blockdev_exists $ZDEV
log_mustnot verify_partition $ZDEV
udev_wait
log_must_busy zfs destroy $ZVOL
# 5.2 Verify sysctl "volmode=none"
sysctl_volmode 3
log_must zfs create -V $VOLSIZE -s $ZVOL
udev_wait
log_must zfs set volmode=default $ZVOL
blockdev_missing $ZDEV
# 6. Verify "volmode" property is inherited correctly
log_must zfs inherit volmode $ZVOL
# 6.1 Check volmode=full case
log_must zfs set volmode=full $TESTPOOL
verify_inherited 'volmode' 'full' $ZVOL $TESTPOOL
blockdev_exists $ZDEV
# 6.2 Check volmode=none case
log_must zfs set volmode=none $TESTPOOL
verify_inherited 'volmode' 'none' $ZVOL $TESTPOOL
blockdev_missing $ZDEV
# 6.3 Check volmode=dev case
log_must zfs set volmode=dev $TESTPOOL
verify_inherited 'volmode' 'dev' $ZVOL $TESTPOOL
blockdev_exists $ZDEV
# 6.4 Check volmode=default case
sysctl_volmode 1
log_must zfs set volmode=default $TESTPOOL
verify_inherited 'volmode' 'default' $ZVOL $TESTPOOL
blockdev_exists $ZDEV
# 6.5 Check inheritance on multiple levels
log_must zfs inherit volmode $SUBZVOL
udev_wait
log_must zfs set volmode=none $VOLFS
udev_wait
log_must zfs set volmode=full $TESTPOOL
verify_inherited 'volmode' 'none' $SUBZVOL $VOLFS
blockdev_missing $SUBZDEV
blockdev_exists $ZDEV
# 7. Verify "volmode" behaves correctly at import time
log_must zpool export $TESTPOOL
blockdev_missing $ZDEV
blockdev_missing $SUBZDEV
log_must zpool import $TESTPOOL
blockdev_exists $ZDEV
blockdev_missing $SUBZDEV
log_must_busy zfs destroy $ZVOL
log_must_busy zfs destroy $SUBZVOL
# 8. Verify "volmode" behaves accordingly to zvol_inhibit_dev (Linux only)
if is_linux; then
sysctl_inhibit_dev 1
# 7.1 Verify device nodes not are not created with "volmode=full"
sysctl_volmode 1
log_must zfs create -V $VOLSIZE -s $ZVOL
blockdev_missing $ZDEV
log_must zfs set volmode=full $ZVOL
blockdev_missing $ZDEV
log_must_busy zfs destroy $ZVOL
# 7.1 Verify device nodes not are not created with "volmode=dev"
sysctl_volmode 2
log_must zfs create -V $VOLSIZE -s $ZVOL
blockdev_missing $ZDEV
log_must zfs set volmode=dev $ZVOL
blockdev_missing $ZDEV
log_must_busy zfs destroy $ZVOL
# 7.1 Verify device nodes not are not created with "volmode=none"
sysctl_volmode 3
log_must zfs create -V $VOLSIZE -s $ZVOL
blockdev_missing $ZDEV
log_must zfs set volmode=none $ZVOL
blockdev_missing $ZDEV
fi
log_pass "Verify that ZFS volume property 'volmode' works as intended"
diff --git a/sys/modules/zfs/zfs_config.h b/sys/modules/zfs/zfs_config.h
index ecc1e2940105..82f6d415f966 100644
--- a/sys/modules/zfs/zfs_config.h
+++ b/sys/modules/zfs/zfs_config.h
@@ -1,849 +1,849 @@
/*
* $FreeBSD$
*/
/* zfs_config.h. Generated from zfs_config.h.in by configure. */
/* zfs_config.h.in. Generated from configure.ac by autoheader. */
/* Define to 1 if translation of program messages to the user's native
language is requested. */
/* #undef ENABLE_NLS */
/* bio_end_io_t wants 1 arg */
/* #undef HAVE_1ARG_BIO_END_IO_T */
/* lookup_bdev() wants 1 arg */
/* #undef HAVE_1ARG_LOOKUP_BDEV */
/* submit_bio() wants 1 arg */
/* #undef HAVE_1ARG_SUBMIT_BIO */
/* bdi_setup_and_register() wants 2 args */
/* #undef HAVE_2ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 2 args */
/* #undef HAVE_2ARGS_VFS_GETATTR */
/* zlib_deflate_workspacesize() wants 2 args */
/* #undef HAVE_2ARGS_ZLIB_DEFLATE_WORKSPACESIZE */
/* bdi_setup_and_register() wants 3 args */
/* #undef HAVE_3ARGS_BDI_SETUP_AND_REGISTER */
/* vfs_getattr wants 3 args */
/* #undef HAVE_3ARGS_VFS_GETATTR */
/* vfs_getattr wants 4 args */
/* #undef HAVE_4ARGS_VFS_GETATTR */
/* kernel has access_ok with 'type' parameter */
/* #undef HAVE_ACCESS_OK_TYPE */
/* posix_acl has refcount_t */
/* #undef HAVE_ACL_REFCOUNT */
/* Define if host toolchain supports AES */
#define HAVE_AES 1
#ifdef __amd64__
#ifndef RESCUE
/* Define if host toolchain supports AVX */
#define HAVE_AVX 1
#endif
/* Define if host toolchain supports AVX2 */
#define HAVE_AVX2 1
/* Define if host toolchain supports AVX512BW */
#define HAVE_AVX512BW 1
/* Define if host toolchain supports AVX512CD */
#define HAVE_AVX512CD 1
/* Define if host toolchain supports AVX512DQ */
#define HAVE_AVX512DQ 1
/* Define if host toolchain supports AVX512ER */
#define HAVE_AVX512ER 1
/* Define if host toolchain supports AVX512F */
#define HAVE_AVX512F 1
/* Define if host toolchain supports AVX512IFMA */
#define HAVE_AVX512IFMA 1
/* Define if host toolchain supports AVX512PF */
#define HAVE_AVX512PF 1
/* Define if host toolchain supports AVX512VBMI */
#define HAVE_AVX512VBMI 1
/* Define if host toolchain supports AVX512VL */
#define HAVE_AVX512VL 1
#endif
/* bdev_check_media_change() exists */
/* #undef HAVE_BDEV_CHECK_MEDIA_CHANGE */
/* bdev_whole() is available */
/* #undef HAVE_BDEV_WHOLE */
/* bio->bi_bdev->bd_disk exists */
/* #undef HAVE_BIO_BDEV_DISK */
/* bio->bi_opf is defined */
/* #undef HAVE_BIO_BI_OPF */
/* bio->bi_status exists */
/* #undef HAVE_BIO_BI_STATUS */
/* bio has bi_iter */
/* #undef HAVE_BIO_BVEC_ITER */
/* bio_*_io_acct() available */
/* #undef HAVE_BIO_IO_ACCT */
/* bio_max_segs() is implemented */
/* #undef HAVE_BIO_MAX_SEGS */
/* bio_set_dev() is available */
/* #undef HAVE_BIO_SET_DEV */
/* bio_set_dev() GPL-only */
/* #undef HAVE_BIO_SET_DEV_GPL_ONLY */
/* bio_set_op_attrs is available */
/* #undef HAVE_BIO_SET_OP_ATTRS */
/* blkdev_reread_part() exists */
/* #undef HAVE_BLKDEV_REREAD_PART */
/* blkg_tryget() is available */
/* #undef HAVE_BLKG_TRYGET */
/* blkg_tryget() GPL-only */
/* #undef HAVE_BLKG_TRYGET_GPL_ONLY */
/* blk_alloc_disk() exists */
/* #undef HAVE_BLK_ALLOC_DISK */
/* blk_alloc_queue() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN */
/* blk_alloc_queue_rh() expects request function */
/* #undef HAVE_BLK_ALLOC_QUEUE_REQUEST_FN_RH */
/* blk queue backing_dev_info is dynamic */
/* #undef HAVE_BLK_QUEUE_BDI_DYNAMIC */
/* blk_queue_flag_clear() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_CLEAR */
/* blk_queue_flag_set() exists */
/* #undef HAVE_BLK_QUEUE_FLAG_SET */
/* blk_queue_flush() is available */
/* #undef HAVE_BLK_QUEUE_FLUSH */
/* blk_queue_flush() is GPL-only */
/* #undef HAVE_BLK_QUEUE_FLUSH_GPL_ONLY */
/* blk_queue_secdiscard() is available */
/* #undef HAVE_BLK_QUEUE_SECDISCARD */
/* blk_queue_secure_erase() is available */
/* #undef HAVE_BLK_QUEUE_SECURE_ERASE */
/* blk_queue_write_cache() exists */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE */
/* blk_queue_write_cache() is GPL-only */
/* #undef HAVE_BLK_QUEUE_WRITE_CACHE_GPL_ONLY */
/* Define if revalidate_disk() in block_device_operations */
/* #undef HAVE_BLOCK_DEVICE_OPERATIONS_REVALIDATE_DISK */
/* Define to 1 if you have the Mac OS X function CFLocaleCopyCurrent in the
CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYCURRENT */
/* Define to 1 if you have the Mac OS X function
CFLocaleCopyPreferredLanguages in the CoreFoundation framework. */
/* #undef HAVE_CFLOCALECOPYPREFERREDLANGUAGES */
/* Define to 1 if you have the Mac OS X function CFPreferencesCopyAppValue in
the CoreFoundation framework. */
/* #undef HAVE_CFPREFERENCESCOPYAPPVALUE */
/* check_disk_change() exists */
/* #undef HAVE_CHECK_DISK_CHANGE */
/* clear_inode() is available */
/* #undef HAVE_CLEAR_INODE */
/* dentry uses const struct dentry_operations */
/* #undef HAVE_CONST_DENTRY_OPERATIONS */
/* copy_from_iter() is available */
/* #undef HAVE_COPY_FROM_ITER */
/* copy_to_iter() is available */
/* #undef HAVE_COPY_TO_ITER */
/* yes */
/* #undef HAVE_CPU_HOTPLUG */
/* current_time() exists */
/* #undef HAVE_CURRENT_TIME */
/* Define if the GNU dcgettext() function is already present or preinstalled.
*/
/* #undef HAVE_DCGETTEXT */
/* DECLARE_EVENT_CLASS() is available */
/* #undef HAVE_DECLARE_EVENT_CLASS */
/* lookup_bdev() wants dev_t arg */
/* #undef HAVE_DEVT_LOOKUP_BDEV */
/* sops->dirty_inode() wants flags */
/* #undef HAVE_DIRTY_INODE_WITH_FLAGS */
/* disk_*_io_acct() available */
/* #undef HAVE_DISK_IO_ACCT */
/* Define to 1 if you have the <dlfcn.h> header file. */
#define HAVE_DLFCN_H 1
/* d_make_root() is available */
/* #undef HAVE_D_MAKE_ROOT */
/* d_prune_aliases() is available */
/* #undef HAVE_D_PRUNE_ALIASES */
/* dops->d_revalidate() operation takes nameidata */
/* #undef HAVE_D_REVALIDATE_NAMEIDATA */
/* eops->encode_fh() wants child and parent inodes */
/* #undef HAVE_ENCODE_FH_WITH_INODE */
/* sops->evict_inode() exists */
/* #undef HAVE_EVICT_INODE */
/* fops->aio_fsync() exists */
/* #undef HAVE_FILE_AIO_FSYNC */
/* file_dentry() is available */
/* #undef HAVE_FILE_DENTRY */
/* file_inode() is available */
/* #undef HAVE_FILE_INODE */
/* iops->follow_link() cookie */
/* #undef HAVE_FOLLOW_LINK_COOKIE */
/* iops->follow_link() nameidata */
/* #undef HAVE_FOLLOW_LINK_NAMEIDATA */
/* fops->fsync() with range */
/* #undef HAVE_FSYNC_RANGE */
/* fops->fsync() without dentry */
/* #undef HAVE_FSYNC_WITHOUT_DENTRY */
/* generic_fillattr requires struct user_namespace* */
/* #undef HAVE_GENERIC_FILLATTR_USERNS */
/* generic_*_io_acct() 3 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_3ARG */
/* generic_*_io_acct() 4 arg available */
/* #undef HAVE_GENERIC_IO_ACCT_4ARG */
/* generic_readlink is global */
/* #undef HAVE_GENERIC_READLINK */
/* generic_setxattr() exists */
/* #undef HAVE_GENERIC_SETXATTR */
/* generic_write_checks() takes kiocb */
/* #undef HAVE_GENERIC_WRITE_CHECKS_KIOCB */
/* Define if the GNU gettext() function is already present or preinstalled. */
/* #undef HAVE_GETTEXT */
/* iops->get_link() cookie */
/* #undef HAVE_GET_LINK_COOKIE */
/* iops->get_link() delayed */
/* #undef HAVE_GET_LINK_DELAYED */
/* group_info->gid exists */
/* #undef HAVE_GROUP_INFO_GID */
/* has_capability() is available */
/* #undef HAVE_HAS_CAPABILITY */
/* Define if you have the iconv() function and it works. */
#define HAVE_ICONV 1
/* yes */
/* #undef HAVE_INODE_LOCK_SHARED */
/* inode_owner_or_capable() exists */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE */
/* inode_owner_or_capable() takes user_ns */
/* #undef HAVE_INODE_OWNER_OR_CAPABLE_IDMAPPED */
/* inode_set_flags() exists */
/* #undef HAVE_INODE_SET_FLAGS */
/* inode_set_iversion() exists */
/* #undef HAVE_INODE_SET_IVERSION */
/* inode->i_*time's are timespec64 */
/* #undef HAVE_INODE_TIMESPEC64_TIMES */
/* timestamp_truncate() exists */
/* #undef HAVE_INODE_TIMESTAMP_TRUNCATE */
/* Define to 1 if you have the <inttypes.h> header file. */
#define HAVE_INTTYPES_H 1
/* in_compat_syscall() is available */
/* #undef HAVE_IN_COMPAT_SYSCALL */
/* iops->create() takes struct user_namespace* */
/* #undef HAVE_IOPS_CREATE_USERNS */
/* iops->mkdir() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKDIR_USERNS */
/* iops->mknod() takes struct user_namespace* */
/* #undef HAVE_IOPS_MKNOD_USERNS */
/* iops->rename() takes struct user_namespace* */
/* #undef HAVE_IOPS_RENAME_USERNS */
/* iops->symlink() takes struct user_namespace* */
/* #undef HAVE_IOPS_SYMLINK_USERNS */
/* iov_iter_advance() is available */
/* #undef HAVE_IOV_ITER_ADVANCE */
/* iov_iter_count() is available */
/* #undef HAVE_IOV_ITER_COUNT */
/* iov_iter_fault_in_readable() is available */
/* #undef HAVE_IOV_ITER_FAULT_IN_READABLE */
/* iov_iter_revert() is available */
/* #undef HAVE_IOV_ITER_REVERT */
/* iov_iter types are available */
/* #undef HAVE_IOV_ITER_TYPES */
/* yes */
/* #undef HAVE_IO_SCHEDULE_TIMEOUT */
/* Define to 1 if you have the `issetugid' function. */
#define HAVE_ISSETUGID 1
/* kernel has kernel_fpu_* functions */
/* #undef HAVE_KERNEL_FPU */
/* kernel has asm/fpu/api.h */
/* #undef HAVE_KERNEL_FPU_API_HEADER */
/* kernel fpu internal */
/* #undef HAVE_KERNEL_FPU_INTERNAL */
/* uncached_acl_sentinel() exists */
/* #undef HAVE_KERNEL_GET_ACL_HANDLE_CACHE */
/* kernel does stack verification */
/* #undef HAVE_KERNEL_OBJTOOL */
/* kernel has linux/objtool.h */
/* #undef HAVE_KERNEL_OBJTOOL_HEADER */
/* kernel_read() take loff_t pointer */
/* #undef HAVE_KERNEL_READ_PPOS */
/* timer_list.function gets a timer_list */
/* #undef HAVE_KERNEL_TIMER_FUNCTION_TIMER_LIST */
/* struct timer_list has a flags member */
/* #undef HAVE_KERNEL_TIMER_LIST_FLAGS */
/* timer_setup() is available */
/* #undef HAVE_KERNEL_TIMER_SETUP */
/* kernel_write() take loff_t pointer */
/* #undef HAVE_KERNEL_WRITE_PPOS */
/* kmem_cache_create_usercopy() exists */
/* #undef HAVE_KMEM_CACHE_CREATE_USERCOPY */
/* kstrtoul() exists */
/* #undef HAVE_KSTRTOUL */
/* ktime_get_coarse_real_ts64() exists */
/* #undef HAVE_KTIME_GET_COARSE_REAL_TS64 */
/* ktime_get_raw_ts64() exists */
/* #undef HAVE_KTIME_GET_RAW_TS64 */
/* kvmalloc exists */
/* #undef HAVE_KVMALLOC */
/* Define if you have [aio] */
/* #undef HAVE_LIBAIO */
/* Define if you have [blkid] */
/* #undef HAVE_LIBBLKID */
/* Define if you have [crypto] */
#define HAVE_LIBCRYPTO 1
/* Define if you have [tirpc] */
/* #undef HAVE_LIBTIRPC */
/* Define if you have [udev] */
/* #undef HAVE_LIBUDEV */
/* Define if you have [uuid] */
/* #undef HAVE_LIBUUID */
/* lseek_execute() is available */
/* #undef HAVE_LSEEK_EXECUTE */
/* makedev() is declared in sys/mkdev.h */
/* #undef HAVE_MAKEDEV_IN_MKDEV */
/* makedev() is declared in sys/sysmacros.h */
/* #undef HAVE_MAKEDEV_IN_SYSMACROS */
/* Noting that make_request_fn() returns blk_qc_t */
/* #undef HAVE_MAKE_REQUEST_FN_RET_QC */
/* Noting that make_request_fn() returns void */
/* #undef HAVE_MAKE_REQUEST_FN_RET_VOID */
/* Define to 1 if you have the <memory.h> header file. */
#define HAVE_MEMORY_H 1
/* iops->mkdir() takes umode_t */
/* #undef HAVE_MKDIR_UMODE_T */
/* Define to 1 if you have the `mlockall' function. */
#define HAVE_MLOCKALL 1
/* lookup_bdev() wants mode arg */
/* #undef HAVE_MODE_LOOKUP_BDEV */
/* Define if host toolchain supports MOVBE */
#define HAVE_MOVBE 1
/* new_sync_read()/new_sync_write() are available */
/* #undef HAVE_NEW_SYNC_READ */
/* iops->getattr() takes a path */
/* #undef HAVE_PATH_IOPS_GETATTR */
/* Define if host toolchain supports PCLMULQDQ */
#define HAVE_PCLMULQDQ 1
/* percpu_counter_add_batch() is defined */
/* #undef HAVE_PERCPU_COUNTER_ADD_BATCH */
/* percpu_counter_init() wants gfp_t */
/* #undef HAVE_PERCPU_COUNTER_INIT_WITH_GFP */
/* posix_acl_chmod() exists */
/* #undef HAVE_POSIX_ACL_CHMOD */
/* posix_acl_from_xattr() needs user_ns */
/* #undef HAVE_POSIX_ACL_FROM_XATTR_USERNS */
/* posix_acl_release() is available */
/* #undef HAVE_POSIX_ACL_RELEASE */
/* posix_acl_release() is GPL-only */
/* #undef HAVE_POSIX_ACL_RELEASE_GPL_ONLY */
/* posix_acl_valid() wants user namespace */
/* #undef HAVE_POSIX_ACL_VALID_WITH_NS */
/* proc_ops structure exists */
/* #undef HAVE_PROC_OPS_STRUCT */
/* iops->put_link() cookie */
/* #undef HAVE_PUT_LINK_COOKIE */
/* iops->put_link() delayed */
/* #undef HAVE_PUT_LINK_DELAYED */
/* iops->put_link() nameidata */
/* #undef HAVE_PUT_LINK_NAMEIDATA */
/* If available, contains the Python version number currently in use. */
#define HAVE_PYTHON "3.7"
/* qat is enabled and existed */
/* #undef HAVE_QAT */
/* iops->rename() wants flags */
/* #undef HAVE_RENAME_WANTS_FLAGS */
/* REQ_DISCARD is defined */
/* #undef HAVE_REQ_DISCARD */
/* REQ_FLUSH is defined */
/* #undef HAVE_REQ_FLUSH */
/* REQ_OP_DISCARD is defined */
/* #undef HAVE_REQ_OP_DISCARD */
/* REQ_OP_FLUSH is defined */
/* #undef HAVE_REQ_OP_FLUSH */
/* REQ_OP_SECURE_ERASE is defined */
/* #undef HAVE_REQ_OP_SECURE_ERASE */
/* REQ_PREFLUSH is defined */
/* #undef HAVE_REQ_PREFLUSH */
/* revalidate_disk() is available */
/* #undef HAVE_REVALIDATE_DISK */
/* revalidate_disk_size() is available */
/* #undef HAVE_REVALIDATE_DISK_SIZE */
/* struct rw_semaphore has member activity */
/* #undef HAVE_RWSEM_ACTIVITY */
/* struct rw_semaphore has atomic_long_t member count */
/* #undef HAVE_RWSEM_ATOMIC_LONG_COUNT */
/* linux/sched/signal.h exists */
/* #undef HAVE_SCHED_SIGNAL_HEADER */
/* Define to 1 if you have the <security/pam_modules.h> header file. */
#define HAVE_SECURITY_PAM_MODULES_H 1
/* setattr_prepare() is available, doesn't accept user_namespace */
/* #undef HAVE_SETATTR_PREPARE_NO_USERNS */
/* setattr_prepare() accepts user_namespace */
/* #undef HAVE_SETATTR_PREPARE_USERNS */
/* iops->set_acl() exists, takes 3 args */
/* #undef HAVE_SET_ACL */
/* iops->set_acl() takes 4 args */
/* #undef HAVE_SET_ACL_USERNS */
/* set_cached_acl() is usable */
/* #undef HAVE_SET_CACHED_ACL_USABLE */
/* set_special_state() exists */
/* #undef HAVE_SET_SPECIAL_STATE */
/* struct shrink_control exists */
/* #undef HAVE_SHRINK_CONTROL_STRUCT */
/* kernel_siginfo_t exists */
/* #undef HAVE_SIGINFO */
/* signal_stop() exists */
/* #undef HAVE_SIGNAL_STOP */
/* new shrinker callback wants 2 args */
/* #undef HAVE_SINGLE_SHRINKER_CALLBACK */
/* ->count_objects exists */
/* #undef HAVE_SPLIT_SHRINKER_CALLBACK */
#if defined(__amd64__) || defined(__i386__)
/* Define if host toolchain supports SSE */
#define HAVE_SSE 1
/* Define if host toolchain supports SSE2 */
#define HAVE_SSE2 1
/* Define if host toolchain supports SSE3 */
#define HAVE_SSE3 1
/* Define if host toolchain supports SSE4.1 */
#define HAVE_SSE4_1 1
/* Define if host toolchain supports SSE4.2 */
#define HAVE_SSE4_2 1
/* Define if host toolchain supports SSSE3 */
#define HAVE_SSSE3 1
#endif
/* STACK_FRAME_NON_STANDARD is defined */
/* #undef HAVE_STACK_FRAME_NON_STANDARD */
/* Define to 1 if you have the <stdint.h> header file. */
#define HAVE_STDINT_H 1
/* Define to 1 if you have the <stdlib.h> header file. */
#define HAVE_STDLIB_H 1
/* Define to 1 if you have the <strings.h> header file. */
#define HAVE_STRINGS_H 1
/* Define to 1 if you have the <string.h> header file. */
#define HAVE_STRING_H 1
/* Define to 1 if you have the `strlcat' function. */
#define HAVE_STRLCAT 1
/* Define to 1 if you have the `strlcpy' function. */
#define HAVE_STRLCPY 1
/* submit_bio is member of struct block_device_operations */
/* #undef HAVE_SUBMIT_BIO_IN_BLOCK_DEVICE_OPERATIONS */
/* super_setup_bdi_name() exits */
/* #undef HAVE_SUPER_SETUP_BDI_NAME */
/* super_block->s_user_ns exists */
/* #undef HAVE_SUPER_USER_NS */
/* Define to 1 if you have the <sys/stat.h> header file. */
#define HAVE_SYS_STAT_H 1
/* Define to 1 if you have the <sys/types.h> header file. */
#define HAVE_SYS_TYPES_H 1
/* i_op->tmpfile() exists */
/* #undef HAVE_TMPFILE */
/* i_op->tmpfile() has userns */
/* #undef HAVE_TMPFILE_USERNS */
/* totalhigh_pages() exists */
/* #undef HAVE_TOTALHIGH_PAGES */
/* kernel has totalram_pages() */
/* #undef HAVE_TOTALRAM_PAGES_FUNC */
/* Define to 1 if you have the `udev_device_get_is_initialized' function. */
/* #undef HAVE_UDEV_DEVICE_GET_IS_INITIALIZED */
/* kernel has __kernel_fpu_* functions */
/* #undef HAVE_UNDERSCORE_KERNEL_FPU */
/* Define to 1 if you have the <unistd.h> header file. */
#define HAVE_UNISTD_H 1
/* iops->getattr() takes struct user_namespace* */
/* #undef HAVE_USERNS_IOPS_GETATTR */
/* iops->getattr() takes a vfsmount */
/* #undef HAVE_VFSMOUNT_IOPS_GETATTR */
/* aops->direct_IO() uses iovec */
/* #undef HAVE_VFS_DIRECT_IO_IOVEC */
/* aops->direct_IO() uses iov_iter without rw */
/* #undef HAVE_VFS_DIRECT_IO_ITER */
/* aops->direct_IO() uses iov_iter with offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_OFFSET */
/* aops->direct_IO() uses iov_iter with rw and offset */
/* #undef HAVE_VFS_DIRECT_IO_ITER_RW_OFFSET */
/* All required iov_iter interfaces are available */
/* #undef HAVE_VFS_IOV_ITER */
/* fops->iterate() is available */
/* #undef HAVE_VFS_ITERATE */
/* fops->iterate_shared() is available */
/* #undef HAVE_VFS_ITERATE_SHARED */
/* fops->readdir() is available */
/* #undef HAVE_VFS_READDIR */
/* fops->read/write_iter() are available */
/* #undef HAVE_VFS_RW_ITERATE */
/* __set_page_dirty_nobuffers exists */
/* #undef HAVE_VFS_SET_PAGE_DIRTY_NOBUFFERS */
/* __vmalloc page flags exists */
/* #undef HAVE_VMALLOC_PAGE_KERNEL */
/* yes */
/* #undef HAVE_WAIT_ON_BIT_ACTION */
/* wait_queue_entry_t exists */
/* #undef HAVE_WAIT_QUEUE_ENTRY_T */
/* wq_head->head and wq_entry->entry exist */
/* #undef HAVE_WAIT_QUEUE_HEAD_ENTRY */
/* xattr_handler->get() wants dentry */
/* #undef HAVE_XATTR_GET_DENTRY */
/* xattr_handler->get() wants both dentry and inode */
/* #undef HAVE_XATTR_GET_DENTRY_INODE */
/* xattr_handler->get() wants xattr_handler */
/* #undef HAVE_XATTR_GET_HANDLER */
/* xattr_handler has name */
/* #undef HAVE_XATTR_HANDLER_NAME */
/* xattr_handler->list() wants dentry */
/* #undef HAVE_XATTR_LIST_DENTRY */
/* xattr_handler->list() wants xattr_handler */
/* #undef HAVE_XATTR_LIST_HANDLER */
/* xattr_handler->list() wants simple */
/* #undef HAVE_XATTR_LIST_SIMPLE */
/* xattr_handler->set() wants dentry */
/* #undef HAVE_XATTR_SET_DENTRY */
/* xattr_handler->set() wants both dentry and inode */
/* #undef HAVE_XATTR_SET_DENTRY_INODE */
/* xattr_handler->set() wants xattr_handler */
/* #undef HAVE_XATTR_SET_HANDLER */
/* xattr_handler->set() takes user_namespace */
/* #undef HAVE_XATTR_SET_USERNS */
/* Define if you have [z] */
#define HAVE_ZLIB 1
/* __posix_acl_chmod() exists */
/* #undef HAVE___POSIX_ACL_CHMOD */
/* kernel exports FPU functions */
/* #undef KERNEL_EXPORTS_X86_FPU */
/* TBD: fetch(3) support */
#if 0
/* whether the chosen libfetch is to be loaded at run-time */
#define LIBFETCH_DYNAMIC 1
/* libfetch is fetch(3) */
#define LIBFETCH_IS_FETCH 1
/* libfetch is libcurl */
#define LIBFETCH_IS_LIBCURL 0
/* soname of chosen libfetch */
#define LIBFETCH_SONAME "libfetch.so.6"
#endif
/* Define to the sub-directory where libtool stores uninstalled libraries. */
#define LT_OBJDIR ".libs/"
/* make_request_fn() return type */
/* #undef MAKE_REQUEST_FN_RET */
/* hardened module_param_call */
/* #undef MODULE_PARAM_CALL_CONST */
/* struct shrink_control has nid */
/* #undef SHRINK_CONTROL_HAS_NID */
/* Defined for legacy compatibility. */
#define SPL_META_ALIAS ZFS_META_ALIAS
/* Defined for legacy compatibility. */
#define SPL_META_RELEASE ZFS_META_RELEASE
/* Defined for legacy compatibility. */
#define SPL_META_VERSION ZFS_META_VERSION
/* True if ZFS is to be compiled for a FreeBSD system */
#define SYSTEM_FREEBSD 1
/* True if ZFS is to be compiled for a Linux system */
/* #undef SYSTEM_LINUX */
/* zfs debugging enabled */
/* #undef ZFS_DEBUG */
/* /dev/zfs minor */
/* #undef ZFS_DEVICE_MINOR */
/* enum node_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_FILE_PAGES */
/* enum node_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum node_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_NODE_STAT_ITEM_NR_INACTIVE_FILE */
/* enum zone_stat_item contains NR_FILE_PAGES */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_FILE_PAGES */
/* enum zone_stat_item contains NR_INACTIVE_ANON */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_ANON */
/* enum zone_stat_item contains NR_INACTIVE_FILE */
/* #undef ZFS_ENUM_ZONE_STAT_ITEM_NR_INACTIVE_FILE */
/* global_node_page_state() exists */
/* #undef ZFS_GLOBAL_NODE_PAGE_STATE */
/* global_zone_page_state() exists */
/* #undef ZFS_GLOBAL_ZONE_PAGE_STATE */
/* Define to 1 if GPL-only symbols can be used */
/* #undef ZFS_IS_GPL_COMPATIBLE */
/* Define the project alias string. */
-#define ZFS_META_ALIAS "zfs-2.1.99-FreeBSD_g3b89d9518"
+#define ZFS_META_ALIAS "zfs-2.1.99-FreeBSD_g4a1195ca5"
/* Define the project author. */
#define ZFS_META_AUTHOR "OpenZFS"
/* Define the project release date. */
/* #undef ZFS_META_DATA */
/* Define the maximum compatible kernel version. */
#define ZFS_META_KVER_MAX "5.13"
/* Define the minimum compatible kernel version. */
#define ZFS_META_KVER_MIN "3.10"
/* Define the project license. */
#define ZFS_META_LICENSE "CDDL"
/* Define the libtool library 'age' version information. */
/* #undef ZFS_META_LT_AGE */
/* Define the libtool library 'current' version information. */
/* #undef ZFS_META_LT_CURRENT */
/* Define the libtool library 'revision' version information. */
/* #undef ZFS_META_LT_REVISION */
/* Define the project name. */
#define ZFS_META_NAME "zfs"
/* Define the project release. */
-#define ZFS_META_RELEASE "FreeBSD_g3b89d9518"
+#define ZFS_META_RELEASE "FreeBSD_g4a1195ca5"
/* Define the project version. */
#define ZFS_META_VERSION "2.1.99"
/* count is located in percpu_ref.data */
/* #undef ZFS_PERCPU_REF_COUNT_IN_DATA */
diff --git a/sys/modules/zfs/zfs_gitrev.h b/sys/modules/zfs/zfs_gitrev.h
index ff8688315330..1af08380dca4 100644
--- a/sys/modules/zfs/zfs_gitrev.h
+++ b/sys/modules/zfs/zfs_gitrev.h
@@ -1,5 +1,5 @@
/*
* $FreeBSD$
*/
-#define ZFS_META_GITREV "zfs-2.1.99-430-g3b89d9518"
+#define ZFS_META_GITREV "zfs-2.1.99-453-g4a1195ca50"

File Metadata

Mime Type
application/octet-stream
Expires
Fri, May 3, 12:25 PM (2 d)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
0gAUw99JUdjX
Default Alt Text
(4 MB)

Event Timeline